query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Get BERT embeddings from a dataloader generator. | def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
with torch.set_grad_enabled(False):
embeddings = {'ids': [],
'embeddings': [],
'labels': []
}
# get BERT training embeddings
if metadata:
for local_ids, local_data, local_meta, local_labels in data_generator:
local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \
local_meta, \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data, local_meta)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
else:
for local_ids, local_data, local_labels in data_generator:
local_data, local_labels = local_data.to(device).long().squeeze(1), \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
return embeddings | [
"def extract_torch_models_embeddings(dataloader, model, cuda, embedding_size=512):\n # model.eval()\n # embeddings = np.zeros((len(dataloader.dataset), embedding_size))\n #\n # one_embedding = torch.zeros(batch_size, embedding_size, 1, 1)\n #\n # def copy_data(m, i, o):\n # one_embedding.co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Justified (name, value, units, doc) strings for active parameters. | def param_strs(self):
name_len = max(len(p.name) for p in self)
value_len = max(len(p.value_str) for p in self.params.values())
units_len = max(len(p.units) for p in self.params.values())
return [(p.name.ljust(name_len), p.value_str.ljust(value_len),
p.units.ljust(units_len), p.__doc__)
for p in self.params.values() if p] | [
"def __parameters_string(self):\n if self._parameters == list():\n return ''\n\n docstring = \"\"\"\n\nParameters:\n\"\"\"\n \n # Compute maximum length of any parameter name\n maxlen = 0\n for param in self._parameters:\n maxlen = max(maxlen, len(param[0]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Base hash on description string, just like equality operator. | def __hash__(self):
return hash(self.description) | [
"def __hash__(self):\n\n return hash((str(self.type) + str(self.value)))",
"def __hash__(self):\n return hash(self.text)",
"def hash(self, string):\n return self.__scaffydb.hash(string)",
"def __hash__(self):\n return hash(str(self))",
"def hash_string(self):\n return self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert 2D alignment parameters (alpha, sx, sy, mirror) into 3D alignment parameters (phi, theta, psi, s2x, s2y, mirror) | def params_2D_3D(alpha, sx, sy, mirror):
phi = 0
psi = 0
theta = 0
alphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)
if mirror > 0:
phi = (540.0 + phi)%360.0
theta = 180.0 - theta
psi = (540.0 - psi + alphan)%360.0
else:
psi = (psi + alphan)%360.0
return phi, theta, psi, s2x, s2y | [
"def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert 3D alignment parameters (phi, theta, psi, s2x, s2y) there is no mirror in 3D! into 2D alignment parameters (alpha, sx, sy, mirror) | def params_3D_2D(phi, theta, psi, s2x, s2y):
if theta > 90.0:
mirror = 1
alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)
else:
mirror = 0
alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)
return alpha, sx, sy, mirror | [
"def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Commented by Zhengfan Yang on 05/01/07 I made some change to the original amoeba so that it can now pass out some values calculated by func other than the criteria. This is important in multilevel amoeba refinement because otherwise, upper level refinement will lose the information of lower level refinement. | def amoeba_multi_level(var, scale, func, ftolerance=1.e-4, xtolerance=1.e-4, itmax=500, data=None):
#print " ENTER AMOEBA MULTI LEVEL"
nvar = len(var) # number of variables in the minimization
nsimplex = nvar + 1 # number of vertices in the simplex
# first set up the simplex
simplex = [0]*(nvar+1) # set the initial simplex
simplex[0] = var[:]
for i in xrange(nvar):
simplex[i+1] = var[:]
simplex[i+1][i] += scale[i]
fvalue = []
for i in xrange(nsimplex): # set the function values for the simplex
result, passout = func(simplex[i], data=data)
#print " amoeba setting ",i,simplex[i],result, passout
fvalue.append([result, passout])
# Ooze the simplex to the maximum
iteration = 0
while 1:
# find the index of the best and worst vertices in the simplex
ssworst = 0
ssbest = 0
for i in xrange(nsimplex):
if fvalue[i][0] > fvalue[ssbest][0]:
ssbest = i
if fvalue[i][0] < fvalue[ssworst][0]:
ssworst = i
# get the average of the nsimplex-1 best vertices in the simplex
pavg = [0.0]*nvar
for i in xrange(nsimplex):
if i != ssworst:
for j in range(nvar): pavg[j] += simplex[i][j]
for j in xrange(nvar): pavg[j] = pavg[j]/nvar # nvar is nsimplex-1
simscale = 0.0
for i in range(nvar):
simscale += abs(pavg[i]-simplex[ssworst][i])/scale[i]
simscale = simscale/nvar
# find the range of the function values
fscale = (abs(fvalue[ssbest][0])+abs(fvalue[ssworst][0]))/2.0
if fscale != 0.0:
frange = abs(fvalue[ssbest][0]-fvalue[ssworst][0])/fscale
else:
frange = 0.0 # all the fvalues are zero in this case
# have we converged?
if (((ftolerance <= 0.0 or frange < ftolerance) and # converged to maximum
(xtolerance <= 0.0 or simscale < xtolerance)) or # simplex contracted enough
(itmax and iteration >= itmax)): # ran out of iterations
return simplex[ssbest],fvalue[ssbest][0],iteration,fvalue[ssbest][1]
# reflect the worst vertex
pnew = [0.0]*nvar
for i in xrange(nvar):
pnew[i] = 2.0*pavg[i] - simplex[ssworst][i]
fnew = func(pnew,data=data)
if fnew[0] <= fvalue[ssworst][0]:
# the new vertex is worse than the worst so shrink
# the simplex.
for i in xrange(nsimplex):
if i != ssbest and i != ssworst:
for j in xrange(nvar):
simplex[i][j] = 0.5*simplex[ssbest][j] + 0.5*simplex[i][j]
fvalue[i] = func(simplex[i],data=data)
for j in xrange(nvar):
pnew[j] = 0.5*simplex[ssbest][j] + 0.5*simplex[ssworst][j]
fnew = func(pnew, data=data)
elif fnew[0] >= fvalue[ssbest][0]:
# the new vertex is better than the best so expand
# the simplex.
pnew2 = [0.0]*nvar
for i in xrange(nvar):
pnew2[i] = 3.0*pavg[i] - 2.0*simplex[ssworst][i]
fnew2 = func(pnew2,data=data)
if fnew2[0] > fnew[0]:
# accept the new vertex in the simplexe
pnew = pnew2
fnew = fnew2
# replace the worst vertex with the new vertex
for i in xrange(nvar):
simplex[ssworst][i] = pnew[i]
fvalue[ssworst] = fnew
iteration += 1
#print "Iteration:",iteration," ",ssbest," ",fvalue[ssbest] | [
"def evaluate(self, aSolution):",
"def refine(self):\n\t\t\n # initialise\n self.poor_value = []\n self.poor_nrefl = []\n\n \t\t# create lists of parameters, global and for each grain\n self.globals = [\"a\",\"b\",\"c\",\"alpha\",\"beta\",\"gamma\",\"wx\",\"wy\"]\n for i in rang... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fit the histogram of the input image under mask with the reference image. | def ce_fit(inp_image, ref_image, mask_image):
hist_res = Util.histc(ref_image, inp_image, mask_image)
args = hist_res["args"]
scale = hist_res["scale"]
data = [hist_res['data'], inp_image, hist_res["ref_freq_bin"], mask_image, int(hist_res['size_img']), hist_res['hist_len']]
res = amoeba(args, scale, hist_func, 1.e-4, 1.e-4, 500, data)
resu = ["Final Parameter [A,B]:", res[0], "Final Chi-square :", -1*res[1], "Number of Iteration :", res[2]]
corrected_image = inp_image*res[0][0] + res[0][1]
result = [resu,"Corrected Image :",corrected_image]
del data[:], args[:], scale[:]
return result | [
"def calculate_2d_histogram(image, mask, color_base):\n print(\"HOL\")\n if color_base == 'HSV':\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n range_hist = [0, 180, 0, 256]\n channels = [0, 1]\n elif color_base == 'LAB':\n image = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the position of the commone line in 3D Formula is (RB^T zhat) cross (RA^T zhat) Returns phi, theta of the common line in degrees. theta always < 90 Notice you don't need to enter psi's; they are irrelevant | def common_line_in3D(phiA,thetaA,phiB,thetaB):
from math import pi, sqrt, cos, sin, asin, atan2
piOver=pi/180.0;
ph1 = phiA*piOver;
th1 = thetaA*piOver;
ph2 = phiB*piOver;
th2 = thetaB*piOver;
#nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ;
#ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ;
#nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR);
nx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2)
ny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2)
nz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2)
norm = nx*nx + ny*ny + nz*nz
if norm < 1e-5:
#print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB
return 0.0, 0.0
if nz<0: nx=-nx; ny=-ny; nz=-nz;
#thetaCom = asin(nz/sqrt(norm))
phiCom = asin(nz/sqrt(norm))
#phiCom = atan2(ny,nx)
thetaCom = atan2(ny, nx)
return phiCom*180.0/pi , thetaCom*180.0/pi | [
"def theta_finder(theta, point_a, point_b, point_c, point_c_new):\n x, y, z = parametrized_circle(point_a, point_b, point_c, theta)\n residual = (x - point_c_new[0])**2 + (y - point_c_new[1])**2 + (z - point_c_new[2])**2\n return residual",
"def theta_center(zpos, rho, pitch_angle, trap_profile):\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Combine 2D alignent parameters including mirror | def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):
t1 = Transform({"type":"2D","alpha":alpha1,"tx":sx1,"ty":sy1,"mirror":mirror1,"scale":1.0})
t2 = Transform({"type":"2D","alpha":alpha2,"tx":sx2,"ty":sy2,"mirror":mirror2,"scale":1.0})
tt = t2*t1
d = tt.get_params("2D")
return d[ "alpha" ], d[ "tx" ], d[ "ty" ], d[ "mirror" ] | [
"def _align_by_DAPI(data_1, data_2, channel_index=0, upsample_factor=2):\n images = data_1[channel_index], data_2[channel_index]\n _, offset = ops.process.Align.calculate_offsets(images, upsample_factor=upsample_factor)\n offsets = [offset] * len(data_2)\n aligned = ops.process.Align.app... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a text file that is composed of columns of numbers into spider doc file | def create_spider_doc(fname,spiderdoc):
from string import atoi,atof
infile = open(fname,"r")
lines = infile.readlines()
infile.close()
nmc = len(lines[0].split())
table=[]
for line in lines:
data = line.split()
for i in xrange(0,nmc):
data[i] = atof(data[i])
table.append(data)
drop_spider_doc(spiderdoc ,table) | [
"def parse_docx_file(path, name):\n document = open_file(path)\n i = 0\n with open(name, 'w', encoding='utf-8') as file:\n fieldnames = ['word', 'meaning']\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n for table in document.tables:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Output the data in slice iz, row ix of an image to standard out. | def dump_row(input, fname, ix=0, iz=0):
fout = open(fname, "w")
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
fout.write("# z = %d slice, x = %d row)\n" % (iz, ix))
line = []
for iy in xrange(ny):
fout.write("%d\t%12.5g\n" % (iy, image.get_value_at(ix,iy,iz)))
fout.close() | [
"def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 ==... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a list of Euler angles suitable for projections. method is either 'S' for Saff algorithm or 'P' for Penczek '94 algorithm 'S' assumes phi1> delta ; symmetry if this is set to pointgroup symmetry (cn or dn) or helical symmetry with pointgroup symmetry (scn or sdn), it will yield angles from the asymmetric unit, not the specified range; | def even_angles(delta = 15.0, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'S', phiEqpsi = "Minus", symmetry='c1'):
from math import pi, sqrt, cos, acos, tan, sin
from utilities import even_angles_cd
from string import lower,split
angles = []
symmetryLower = symmetry.lower()
symmetry_string = split(symmetry)[0]
if (symmetry_string[0] == "c"):
if(phi2 == 359.99):
angles = even_angles_cd(delta, theta1, theta2, phi1, phi2/int(symmetry_string[1:]), method, phiEqpsi)
if(int(symmetry_string[1:]) > 1):
if( int(symmetry_string[1:])%2 ==0):
qt = 360.0/int(symmetry_string[1:])
else:
qt = 180.0/int(symmetry_string[1:])
n = len(angles)
for i in xrange(n):
t = n-i-1
if(angles[t][1] == 90.0):
if(angles[t][0] >= qt): del angles[t]
else:
angles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)
elif(symmetry_string[0] == "d"):
if(phi2 == 359.99):
angles = even_angles_cd(delta, theta1, theta2, phi1, 360.0/2/int(symmetry_string[1:]), method, phiEqpsi)
if (int(symmetry_string[1:])%2 == 0):
qt = 360.0/2/int(symmetry_string[1:])
else:
qt = 180.0/2/int(symmetry_string[1:])
n = len(angles)
for i in xrange(n):
t = n-i-1
if(angles[t][1] == 90.0):
if(angles[t][0] >= qt): del angles[t]
else:
angles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)
elif(symmetry_string[0] == "s"):
#if symetry is "s", deltphi=delta, theata intial=theta1, theta end=90, delttheta=theta2
# for helical, theta1 cannot be 0.0
if theta1 > 90.0:
ERROR('theta1 must be less than 90.0 for helical symmetry', 'even_angles', 1)
if theta1 == 0.0: theta1 =90.0
theta_number = int((90.0 - theta1)/theta2)
#for helical, symmetry = s or scn
cn = int(symmetry_string[2:])
for j in xrange(theta_number,-1, -1):
if( j == 0):
if (symmetry_string[1] =="c"):
if cn%2 == 0:
k=int(359.99/cn/delta)
else:
k=int(359.99/2/cn/delta)
elif (symmetry_string[1] =="d"):
if cn%2 == 0:
k=int(359.99/2/cn/delta)
else:
k=int(359.99/4/cn/delta)
else:
ERROR("For helical strucutre, we only support scn and sdn symmetry","even_angles",1)
else:
if (symmetry_string[1] =="c"):
k=int(359.99/cn/delta)
elif (symmetry_string[1] =="d"):
k=int(359.99/2/cn/delta)
for i in xrange(k+1):
angles.append([i*delta,90.0-j*theta2,90.0])
else : # This is very close to the Saff even_angles routine on the asymmetric unit;
# the only parameters used are symmetry and delta
# The formulae are given in the Transform Class Paper
# The symmetric unit nVec=[]; # x,y,z triples
# is defined by three points b,c, v of Fig 2 of the paper
# b is (0,0,1)
# c is (sin(thetac),0,cos(thetac))
# a is (sin(thetac)cos(Omega),sin(thetac)cos(Omega),cos(thetac))
# f is the normalized sum of all 3
# The possible symmetries are in list_syms
# The symmetry determines thetac and Omega
# The spherical area is Omega - pi/3;
# should be equal to 4 *pi/(3*# Faces)
#
# symmetry ='tet'; delta = 6;
scrunch = 0.9 # closeness factor to eliminate oversampling corners
#nVec=[] # x,y,z triples
piOver = pi/180.0
Count=0 # used to count the number of angles
if (symmetryLower[0:3] =="tet"): m=3.0; fudge=0.9 # fudge is a factor used to adjust phi steps
elif (symmetryLower[0:3] =="oct"): m=4.0; fudge=0.8
elif (symmetryLower[0:3] =="ico"): m=5.0; fudge=0.95
else: ERROR("allowable symmetries are cn, dn, tet, oct, icos","even_angles",1)
n=3.0
OmegaR = 2.0*pi/m; cosOmega= cos(OmegaR)
Edges = 2.0*m*n/(2.0*(m+n)-m*n)
Faces = 2*Edges/n
Area = 4*pi/Faces/3.0; # also equals 2*pi/3 + Omega
costhetac = cosOmega/(1-cosOmega)
deltaRad= delta*pi/180
NumPoints = int(Area/(deltaRad*deltaRad))
fheight = 1/sqrt(3)/ (tan(OmegaR/2.0))
z0 = costhetac # initialize loop
z = z0
phi = 0
Deltaz = (1-costhetac)/(NumPoints-1)
#[1, phi,180.0*acos(z)/pi,0.]
anglesLast = [phi,180.0*acos(z)/pi,0.]
angles.append(anglesLast)
nLast= [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]
nVec = []
nVec.append(nLast)
Count +=1
for k in xrange(1,(NumPoints-1)):
z=z0 + Deltaz*k # Is it higher than fhat or lower
r= sqrt(1-z*z)
if (z > fheight): phiRmax= OmegaR/2.0
if (z<= fheight):
thetaR = acos(z);
cosStuff = (cos(thetaR)/sin(thetaR))*sqrt(1. - 2 *cosOmega);
phiMax = 180.0*( OmegaR - acos(cosStuff))/pi
angleJump = fudge* delta/r
phi = (phi + angleJump)%(phiMax)
anglesNew = [phi,180.0*acos(z)/pi,0.];
nNew = [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]
diffangleVec = [acos(nNew[0]*nVec[k][0] + nNew[1]*nVec[k][1] + nNew[2]*nVec[k][2] ) for k in xrange(Count)]
diffMin = min(diffangleVec)
if (diffMin>angleJump*piOver *scrunch):
Count +=1
angles.append(anglesNew)
nVec.append(nNew)
#[Count, phi,180*acos(z)/pi,0.]
anglesLast = anglesNew
nLast=nNew
angles.append( [0.0, 0.0, 0.0] )
nLast= [ 0., 0. , 1.]
nVec.append(nLast)
if(theta2 == 180.0): angles.append( [0.0, 180.0, 0.0] )
angles.reverse()
if(phiEqpsi == "Minus"):
for i in xrange(len(angles)): angles[i][2] = (720.0-angles[i][0])%360.0
#print(Count,NumPoints)
# look at the distribution
# Count =len(angles); piOver= pi/180.0;
# phiVec = [ angles[k][0] for k in range(Count)] ;
# thetaVec = [ angles[k][1] for k in range(Count)] ;
# xVec = [sin(piOver * angles[k][1]) * cos(piOver * angles[k][0]) for k in range(Count) ]
# yVec = [sin(piOver * angles[k][1])* sin(piOver * angles[k][0]) for k in range(Count) ]
# zVec = [cos(piOver * angles[k][1]) for k in range(Count) ]
# pylab.plot(yVec,zVec,'.'); pylab.show()
return angles | [
"def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'):\n\tfrom math import pi, sqrt, cos, acos\n\tangles = []\n\tif (method == 'P'):\n\t\ttemp = Util.even_angles(delta, theta1, theta2, phi1, phi2)\n\t\t#\t\t phi, theta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a list of Euler angles suitable for projections. method is either 'S' for Saff algorithm or 'P' for Penczek '94 algorithm 'S' assumes phi1> delta ; phiEQpsi set this to 'Minus', if you want psi=phi; | def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'):
from math import pi, sqrt, cos, acos
angles = []
if (method == 'P'):
temp = Util.even_angles(delta, theta1, theta2, phi1, phi2)
# phi, theta, psi
for i in xrange(len(temp)/3): angles.append([temp[3*i],temp[3*i+1],temp[3*i+2]]);
else: #elif (method == 'S'):
Deltaz = cos(theta2*pi/180.0)-cos(theta1*pi/180.0)
s = delta*pi/180.0
NFactor = 3.6/s
wedgeFactor = abs(Deltaz*(phi2-phi1)/720.0)
NumPoints = int(NFactor*NFactor*wedgeFactor)
angles.append([phi1, theta1, 0.0])
z1 = cos(theta1*pi/180.0); phi=phi1 # initialize loop
for k in xrange(1,(NumPoints-1)):
z=z1 + Deltaz*k/(NumPoints-1)
r= sqrt(1-z*z)
phi = phi1+(phi + delta/r -phi1)%(abs(phi2-phi1))
#[k, phi,180*acos(z)/pi, 0]
angles.append([phi, 180*acos(z)/pi, 0.0])
#angles.append([p2,t2,0]) # This is incorrect, as the last angle is really the border, not the element we need. PAP 01/15/07
if (phiEQpsi == 'Minus'):
for k in xrange(len(angles)): angles[k][2] = (720.0 - angles[k][0])%360.0
if( theta2 == 180.0 ): angles.append( [0.0, 180.0, 0.0] )
return angles | [
"def even_angles(delta = 15.0, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'S', phiEqpsi = \"Minus\", symmetry='c1'):\n\n\tfrom math import pi, sqrt, cos, acos, tan, sin\n\tfrom utilities import even_angles_cd\n\tfrom string import lower,split\n\tangles = []\n\tsymmetryLower = symmetry.lower()\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the in_plane angle from two images and output the crosss correlation value The function won't destroy input two images This is the angle that rotates the first image, ima, into the second image, ref. The sense of the rotation is clockwise. center=1 means image is first centered, then rotation angle is found | def get_inplane_angle(ima,ref, iring=1, fring=-1, ringstep=1, xtransSearch=0, ytransSearch=0, stp=1, center=1):
from alignment import Numrinit, ringwe, Applyws, ormq
from filter import fshift
first_ring=int(iring); last_ring=int(fring); rstep=int(ringstep); xrng=int(xtransSearch); yrng=int(ytransSearch); step=int(stp)
nx=ima.get_xsize()
if(last_ring == -1): last_ring=int(nx/2)-2
cnx = int(nx/2)+1
cny = cnx
mode = "F"
#precalculate rings
numr = Numrinit(first_ring, last_ring, rstep, mode)
wr = ringwe(numr, mode)
if(center==1):
cs = [0.0]*2 # additio
cs = ref.phase_cog()
ref1 = fshift(ref, -cs[0], -cs[1])
cimage=Util.Polar2Dm(ref1, cnx, cny, numr, mode)
cs = ima.phase_cog()
ima1 = fshift(ima, -cs[0], -cs[1])
else:
ima1=ima.copy()
cimage=Util.Polar2Dm(ref, cnx, cny, numr, mode)
Util.Frngs(cimage, numr)
Applyws(cimage, numr, wr)
[angt, sxst, syst, mirrort, peakt]=ormq(ima1, cimage, xrng, yrng, step, mode, numr, cnx, cny)
return angt,sxst, syst, mirrort, peakt | [
"def calculate_translation(reference_im:np.ndarray, \n target_im:np.ndarray,\n ref_to_tar_rotation:np.ndarray=None,\n use_autocorr:bool=True,\n alignment_kwargs:dict={},\n verbose:bool=True,\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an image created from a text file. The first line of the image should contain "nx ny nz" (separated by whitespace) All subsequent lines contain "ix iy iz val", where ix, iy, and iz are the integer x, y, and z coordinates of the point and val is the floating point value of that point. All points not explicitly listed are set to zero. | def get_textimage(fname):
from string import atoi,atof
infile = open(fname)
lines = infile.readlines()
infile.close()
data = lines[0].split()
nx = atoi(data[0])
ny = atoi(data[1])
nz = atoi(data[2])
e = EMData()
e.set_size(nx, ny, nz)
e.to_zero()
for line in lines[1:]:
data = line.split()
ix = atoi(data[0])
iy = atoi(data[1])
iz = atoi(data[2])
val = atof(data[3])
e[ix,iy,iz] = val
return e | [
"def read_from_grid(filename):\n\n x=[]\n y=[]\n z=[]\n\n fid=open(filename,'r')\n\n for point in fid:\n x.append(float(point.split()[0]))\n y.append(float(point.split()[1]))\n z.append(float(point.split()[2]))\n\n fid.close()\n\n return x, y, z",
"def read_2d_analysis_da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a list of available symmetries | def list_syms():
SymStringVec=[];
SymStringVec.append("CSYM");
SymStringVec.append("DSYM");
SymStringVec.append("TET_SYM");
SymStringVec.append("OCT_SYM");
SymStringVec.append("ICOS_SYM");
SymStringVec.append("ISYM");
return SymStringVec | [
"def available_symbologies():\n consts = [d[8:] for d in dir(zint) if d.startswith('BARCODE_')]\n\n return [d for d in consts if d not in IGNORE_ZINT_CONSTS]",
"async def _load_supported_symbols() -> List[Symbol]:\n return []",
"def __set_symbol_dict(self):\r\n return {0: list(alph) if self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a centered square (or cube) with edge length of d. | def model_square(d, nx, ny, nz=1):
e = EMData()
e.set_size(nx, ny, nz)
e.process_inplace("testimage.squarecube", {"edge_length":d, "fill":1})
return e | [
"def square_diamond(sx, sy, size, strong):\n if size == 1:\n return\n\n dsize = size/2\n ex = sx+size-1\n ey = sy+size-1\n # lets get math style\n\n\n # SQUARE STEP\n\n A = sx, sy\n B = ex, sy\n C = sx,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a Spider filename string and insert parameters. | def parse_spider_fname(mystr, *fieldvals):
# helper functions and classes
def rm_stack_char(mystr):
"Helper function to remove a stack character if it exists"
stackloc = mystr.find("@")
if stackloc != -1:
# there's an '@' somewhere
if len(mystr) - 1 == stackloc:
# It's at the end of the string
return mystr[:-1]
else:
# '@' not at the end, so it's an error
raise ValueError, "Invalid format: misplaced '@'."
else:
# no '@' at all
return mystr
class Fieldloc:
"Helper class to store description of a field"
def __init__(self, begin, end):
self.begin = begin
self.end = end
def count(self):
"Size of the field (including braces)"
return self.end - self.begin + 1
def find_fields(mystr):
"Helper function to identify and validate fields in a string"
fields = []
loc = 0
while True:
begin = mystr.find('{', loc)
if begin == -1: break
end = mystr.find('}', begin)
field = Fieldloc(begin, end)
# check validity
asterisks = mystr[begin+1:end]
if asterisks.strip("*") != "":
raise ValueError, "Malformed {*...*} field: %s" % \
mystr[begin:end+1]
fields.append(Fieldloc(begin, end))
loc = end
return fields
# remove leading whitespace
mystr.strip()
# remove stack character (if it exists)
mystr = rm_stack_char(mystr)
# locate fields to replace
fields = find_fields(mystr)
if len(fields) != len(fieldvals):
# wrong number of fields?
raise ValueError, "Number of field values provided differs from" \
"the number of {*...*} fields."
newstrfrags = []
loc = 0
for i, field in enumerate(fields):
# text before the field
newstrfrags.append(mystr[loc:field.begin])
# replace the field with the field value
fieldsize = field.count() - 2
fielddesc = "%0" + str(fieldsize) + "d"
newstrfrags.append(fielddesc % fieldvals[i])
loc = field.end + 1
newstrfrags.append(mystr[loc:])
return "".join(newstrfrags) | [
"def parse_params(self, fn=None):\n return parse_filename(str(self) if fn is None else fn, conventions=self.conventions, postprocessor=self.postprocessor)",
"def _define_params_from_string(self, params_string):\n params_list = params_string.split()\n self.token_name = params_list[0]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the data in slice iz, row ix of an image to standard out. | def print_row(input, ix=0, iz=0):
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
print "(z = %d slice, x = %d row)" % (iz, ix)
line = []
for iy in xrange(ny):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((iy + 1) % 5 == 0): line.append("\n ")
line.append("\n")
print "".join(line) | [
"def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the data in slice iz, column iy of an image to standard out. | def print_col(input, iy=0, iz=0):
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
print "(z = %d slice, y = %d col)" % (iz, iy)
line = []
for ix in xrange(nx):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((ix + 1) % 5 == 0): line.append("\n ")
line.append("\n")
print "".join(line) | [
"def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the data in slice iz of an image to standard out. | def print_slice(input, iz=0):
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
print "(z = %d slice)" % (iz)
line = []
for iy in xrange(ny):
line.append("Row ")
line.append("%4i " % iy)
for ix in xrange(nx):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((ix + 1) % 5 == 0):
line.append("\n ")
line.append(" ")
line.append("\n")
if(nx%5 != 0): line.append("\n")
print "".join(line) | [
"def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the data in slice iz of an image to standard out in a format that agrees with v2 | def print_image_slice(input, iz=0):
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
print "(z = %d slice)" % (iz)
line = []
for iy in xrange(ny-1,-1,-1):
line.append("Row ")
line.append("%4i " % iy)
for ix in xrange(nx):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((ix + 1) % 5 == 0):
line.append("\n ")
line.append(" ")
line.append("\n")
if(nx%5 != 0): line.append("\n")
print "".join(line) | [
"def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read data from text file, if ncol = 1, read all columns if ncol >= 0, just read the (ncol+1)th column. | def read_text_file(file_name, ncol = 0):
from string import split
inf = file(file_name, "r")
line = inf.readline()
data = []
while len(line) > 0:
if ncol == -1:
vdata = split(line)
if data == []:
for i in xrange(len(vdata)):
data.append([float(vdata[i])])
else:
for i in xrange(len(vdata)):
data[i].append(float(vdata[i]))
else:
vdata = float(split(line)[ncol])
data.append(vdata)
line = inf.readline()
return data | [
"def readOFColumnData(dataFile,nCol):\n fileCheck(dataFile) # does the file exists ? Stop if not.\n #\n # Init list\n data = []\n #\n for line in fileinput.input(dataFile):\n # remove parenthesis if any\n line = line.replace('(', '')\n line = line.replace(')', '') \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
linearly interpolate a 1D power spectrum to required length with required Pixel size input_object a 1D list with a 1D curve to be interpolated length_current half size of the image size (in case of power spectrum, it can be different from the length of the input_object) length_interpolated length of the interpolated 1D curve Pixel_size_current pixel size of the input 1D list Pixel_size_interpolated pixel size of the target 1D list One can either input the two lengths or two respective pixel sizes | def reshape_1d(input_object, length_current=0, length_interpolated=0, Pixel_size_current = 0., Pixel_size_interpolated = 0.):
interpolated = []
if length_current == 0: length_current = len(input_object)
lt = len(input_object) - 2
if length_interpolated == 0:
if( Pixel_size_interpolated != Pixel_size_current):
length_interpolated = int(length_current*Pixel_size_current/Pixel_size_interpolated + 0.5)
else:
ERROR("Incorrect input parameters","reshape_1d",1)
return []
if Pixel_size_current == 0.:
Pixel_size_current = 1.
Pixel_size_interpolated = Pixel_size_current*float(length_current)/float(length_interpolated)
qt =Pixel_size_interpolated/Pixel_size_current
for i in xrange(length_interpolated):
xi = float(i)*qt
ix = min(int(xi),lt)
df = xi -ix
xval = (1.0-df)*input_object[ix] + df*input_object[ix+1]
interpolated.append(xval)
return interpolated | [
"def interpolation(self):\n def find_min_delta_omega(oemga:ndarray):\n \"\"\"计算频率间隔的最小值\n Parameters\n ----------\n oemga : ndarray\n 频率数组\n\n Returns\n -------\n float \n min_delta_omega\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gather the a list of EMData on all nodes to the main node, we assume the list has the same length on each node. | def gather_EMData(data, number_of_proc, myid, main_node):
from mpi import MPI_COMM_WORLD, MPI_INT, MPI_TAG_UB
from mpi import mpi_send, mpi_recv
l = len(data)
gathered_data = []
inc = 1 # A temp measure
if myid == main_node:
for i in xrange(0, number_of_proc*inc, inc):
if i == main_node:
for k in xrange(l):
gathered_data.append(data[k])
else:
for k in xrange(l):
im = recv_EMData(i, i*l+k)
mem_len = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
members = mpi_recv(int(mem_len[0]), MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
members = map(int, members)
im.set_attr('members', members)
gathered_data.append(im)
else:
for k in xrange(l):
send_EMData(data[k], main_node, myid*l+k)
mem = data[k].get_attr('members')
mpi_send(len(mem), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
mpi_send(mem, len(mem), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
return gathered_data | [
"def items(self):\n # Create an empty list of results\n result = [] # Constant time to create a new list\n # Start at the head node\n node = self.head # Constant time to assign a variable reference\n # Loop until the node is None, which is one node too far past the tail\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
write headers from files in data into a disk file called filename. The filename has to be either hdf or bdb. lima list with positions in the disk files into which headers will be written, i.e., header from data[k] will be written into file number lima[k] | def write_headers(filename, data, lima):
from utilities import file_type
from EMAN2db import db_open_dict
ftp = file_type(filename)
if ftp == "bdb":
# For unknown reasons this does not work on Linux, but works on Mac ??? Really?
DB = db_open_dict(filename)
for i in range(len(lima)):
DB.set_header(lima[i], data[i])
DB.close()
#for i in range(len(lima)):
# data[i].write_image(filename, lima[i])
elif ftp == "hdf":
for i in range(len(lima)):
data[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True)
else:
ERROR("Unacceptable file format","write_headers",1) | [
"def write_header(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\tDB = db_open_dict(filename)\n\t\tDB.set_header(lima, data)\n\telif ftp == \"hdf\":\n\t\tdata.write_image(filename, lima, EMUtil.ImageType.IMAGE_HD... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
write header from a single file data into a disk file called filename. The filename has to be either hdf or bdb. lima position in the disk files into which header will be written, i.e., header from data will be written into file number lima | def write_header(filename, data, lima):
from utilities import file_type
from EMAN2db import db_open_dict
ftp = file_type(filename)
if ftp == "bdb":
DB = db_open_dict(filename)
DB.set_header(lima, data)
elif ftp == "hdf":
data.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True)
else:
ERROR("Unacceptable file format","write_headers",1) | [
"def write_headers(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\t# For unknown reasons this does not work on Linux, but works on Mac ??? Really?\n\t\tDB = db_open_dict(filename)\n\t\tfor i in range(len(lima)):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
retrieve 3D alignment parameters from the header phi theta psi tx ty tz mirror scale | def get_params3D(ima, xform = "xform.align3d"):
t = ima.get_attr(xform)
d = t.get_params("spider")
return d["phi"],d["theta"],d["psi"],d["tx"],d["ty"],d["tz"],d["mirror"],d["scale"] | [
"def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set 3D alignment parameters in the header phi theta psi tx ty tz mirror scale | def set_params3D(ima, p, xform = "xform.align3d"):
t = Transform({"type":"spider","phi":p[0],"theta":p[1],"psi":p[2],"tx":p[3],"ty":p[4],"tz":p[5],"mirror":p[6],"scale":p[7]})
ima.set_attr(xform, t) | [
"def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]",
"def setCameraRotation3D(ang):\n dislin.vup3d(ang)",
"def params_3D_2D(phi, theta, psi, s2x, s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
retrieve projection alignment parameters from the header phi theta psi s2x s2y | def get_params_proj(ima, xform = "xform.projection"):
t = ima.get_attr(xform)
d = t.get_params("spider")
return d["phi"],d["theta"],d["psi"],-d["tx"],-d["ty"] | [
"def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set projection alignment parameters in the header phi theta psi s2x s2y | def set_params_proj(ima, p, xform = "xform.projection"):
from EMAN2 import Vec2f
t = Transform({"type":"spider","phi":p[0],"theta":p[1],"psi":p[2]})
t.set_trans(Vec2f(-p[3], -p[4]))
ima.set_attr(xform, t) | [
"def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
recover numerical values of CTF parameters from EMAN2 CTF object stored in a header of the input image | def get_ctf(ima):
from EMAN2 import EMAN2Ctf
ctf_params = ima.get_attr("ctf")
return ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang | [
"def extract_feature(fileName,pca_params,n1,n2):\n # Get kernel and bais\n kernel0 = np.array(pca_params['Layer_0/kernel'])\n kernel1 = np.array(pca_params['Layer_1/kernel'])\n bias1 = pca_params['Layer_1/bias'].astype(np.float32)\n # print(bias1)\n # print('kernel0 shape: ',kernel0.shape)\n # ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
generate EMAN2 CTF object using values of CTF parameters given in the list p | def generate_ctf(p):
from EMAN2 import EMAN2Ctf
defocus = p[0]
cs = p[1]
voltage = p[2]
pixel_size = p[3]
bfactor = p[4]
amp_contrast = p[5]
if defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention
defocus *= 1e-4
if amp_contrast < 1.0:
from math import sqrt
amp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1)
ctf = EMAN2Ctf()
if(len(p) == 6):
ctf.from_dict({"defocus":defocus, "cs":cs, "voltage":voltage, "apix":pixel_size, "bfactor":bfactor, "ampcont":amp_contrast})
else:
ctf.from_dict({"defocus":defocus, "cs":cs, "voltage":voltage, "apix":pixel_size, "bfactor":bfactor, "ampcont":amp_contrast,'dfdiff':p[6],'dfang':p[7]})
return ctf | [
"def from_thermo(T_C, p):\n\ty = y_from_p(p)\n\tx = x_from_Tp(T_C+C_to_K, p)\n\treturn x, y",
"def target_distribution_gen(name, parameter1, parameter2):\n if name==\"CHSH\":\n v = parameter2\n p = np.array([\n (-2 + np.sqrt(2) + (-1 + np.sqrt(2))*v)/(16.*(-2 + np.sqrt(2))),(2 + np.sqrt(2)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set EMAN2 CTF object in the header of input image using values of CTF parameters given in the list p | def set_ctf(ima, p):
from utilities import generate_ctf
ctf = generate_ctf( p )
ima.set_attr( "ctf", ctf ) | [
"def generate_ctf(p):\n\tfrom EMAN2 import EMAN2Ctf\n\n\tdefocus = p[0]\n\tcs = p[1]\n\tvoltage = p[2]\n\tpixel_size = p[3]\n\tbfactor = p[4]\n\tamp_contrast = p[5]\n\t\n\tif defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention\n\t\tdefocus *= 1e-4\n\t\n\tif amp_con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find all occurences of val on list lo Returns a list of indices of val on lo. | def findall(lo,val):
u = []
i = -1
while( i < len(lo)-1):
try:
i = lo.index(val,i+1)
u.append(i)
except:
i += 1
return u | [
"def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]",
"def getIndexes(self, val):\n # Find where this value is listed. \n valNdx = (self.values == val).nonzero()[0]\n \n # If this value is not actually in those listed, then we \n # must return empty indexes\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find overall 3D rotation (phi theta psi) between two sets of Eulerian angles. The two sets have to be of the same length and it is assume that k'th element on the first list corresponds to the k'th element on the second list. | def rotation_between_anglesets(agls1, agls2):
from math import sin, cos, pi, sqrt, atan2, acos, atan
from numpy import array, linalg, matrix
import types
deg2rad = pi/180.0
def ori2xyz(ori):
if(type(ori) == types.ListType):
phi, theta, psi = ori[:3]
else:
# it has to be Transformation object
d = ori.get_params("spider")
phi = d["phi"]
theta = d["theta"]
psi = d["psi"]
"""
# This makes no sense here! PAP 09/2011
if theta > 90.0:
phi += 180.0
theta = 180.0-theta
"""
phi *= deg2rad
theta *= deg2rad
x = sin(theta) * sin(phi)
y = sin(theta) * cos(phi)
z = cos(theta)
return [x, y, z]
N = len(agls1)
if N != len(agls2):
print 'Both lists must have the same length'
return -1
if N < 2:
print 'At least two orientations are required in each list'
return -1
U1, U2 = [], []
for n in xrange(N):
p1 = ori2xyz(agls1[n])
p2 = ori2xyz(agls2[n])
U1.append(p1)
U2.append(p2)
# compute all Suv with uv = {xx, xy, xz, yx, ..., zz}
Suv = [0] * 9
c = 0
nbori = len(U1)
for i in xrange(3):
for j in xrange(3):
for s in xrange(nbori):
Suv[c] += (U2[s][i] * U1[s][j])
c += 1
# create matrix N
N = array([[Suv[0]+Suv[4]+Suv[8], Suv[5]-Suv[7], Suv[6]-Suv[2], Suv[1]-Suv[3]],
[Suv[5]-Suv[7], Suv[0]-Suv[4]-Suv[8], Suv[1]+Suv[3], Suv[6]+Suv[2]],
[Suv[6]-Suv[2], Suv[1]+Suv[3], -Suv[0]+Suv[4]-Suv[8], Suv[5]+Suv[7]],
[Suv[1]-Suv[3], Suv[6]+Suv[2], Suv[5]+Suv[7], -Suv[0]-Suv[4]+Suv[8]]])
# eigenvector corresponding to the most positive eigenvalue
val, vec = linalg.eig(N)
q0, qx, qy, qz = vec[:, val.argmax()]
# create quaternion Rot matrix
r = [q0*q0-qx*qx+qy*qy-qz*qz, 2*(qy*qx+q0*qz), 2*(qy*qz-q0*qx), 0.0,
2*(qx*qy-q0*qz), q0*q0+qx*qx-qy*qy-qz*qz, 2*(qx*qz+q0*qy), 0.0,
2*(qz*qy+q0*qx), 2*(qz*qx-q0*qy), q0*q0-qx*qx-qy*qy+qz*qz, 0.0]
R = Transform(r)
dictR = R.get_rotation('SPIDER')
return dictR['phi'], dictR['theta'], dictR['psi'] | [
"def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_A... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve pixel size from the header. We check attribute Pixel_size and also pixel size from ctf object, if exisits. If the two are different or if the pixel size is not set, return 1.0 and print a warning. | def get_pixel_size(img):
p1 = img.get_attr_default("apix_x", -1.0)
cc = img.get_attr_default("ctf", None)
if cc == None:
p2 = -1.0
else:
p2 = round(cc.apix, 3)
if p1 == -1.0 and p2 == -1.0:
ERROR("Pixel size not set", "get_pixel_size", 0)
return -1.0
elif p1 > -1.0 and p2 > -1.0:
if abs(p1-p2) >= 0.001:
ERROR("Conflict between pixel size in attribute and in ctf object", "get_pixel_size", 0)
# pixel size is positive, so what follows omits -1 problem
return max(p1, p2)
else:
return max(p1, p2) | [
"def get_image_size(self):",
"def GetSizeCX(self):\n ...",
"def getSizePix(self):\r\n if 'sizePix' in self.currentCalib:\r\n return self.currentCalib['sizePix']\r\n else:\r\n return None",
"def get_size(self):\n if self.file_meta[:2] == b'bp':\n fil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For the given grouping, convert ROOT files into DataFrames merging groups together. Return a dictionary mapping file names to DataFrames. | def process_group(directory: str, files: dict, channel: str, year: str) -> dict:
if len(files) == 0:
raise Exception('empty file list for directory {}'.format(directory)) + 1
dataframes = {}
for name, ifile in files.items():
# equivalent of hadding
update_dfs = uproot.pandas.iterate(ifile, f'{channel}_tree')
current_dfs = []
for update_df in update_dfs:
update_df.fillna(-999, inplace=True)
current_dfs.append(update_df)
if len(current_dfs) > 0:
dataframes[name] = pd.concat(current_dfs)
dataframes['metadata'] = pd.DataFrame({'channel': [channel], 'year': [year]})
return dataframes | [
"def _load_group_data(directory='', file_name='', df=True):\n\n # check if folder exists with experiment name\n if os.path.isdir(directory) is False:\n print 'making new directory to save data'\n os.mkdir(directory)\n \n # all files in directory\n files = os.listdir(directory)\n\n # ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Guess an appropriate chunk layout for an array, given its shape and the size of each element in bytes. Will allocate chunks only as large as MAX_SIZE. Chunks are generally close to some powerof2 fraction of each axis, slightly favoring bigger values for the last index. Undocumented and subject to change without warning. | def guess_chunks(shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]:
ndims = len(shape)
# require chunks to have non-zero length for all dimensions
chunks = np.maximum(np.array(shape, dtype="=f8"), 1)
# Determine the optimal chunk size in bytes using a PyTables expression.
# This is kept as a float.
dset_size = np.prod(chunks) * typesize
target_size = CHUNK_BASE * (2 ** np.log10(dset_size / (1024.0 * 1024)))
if target_size > CHUNK_MAX:
target_size = CHUNK_MAX
elif target_size < CHUNK_MIN:
target_size = CHUNK_MIN
idx = 0
while True:
# Repeatedly loop over the axes, dividing them by 2. Stop when:
# 1a. We're smaller than the target chunk size, OR
# 1b. We're within 50% of the target chunk size, AND
# 2. The chunk is smaller than the maximum chunk size
chunk_bytes = np.prod(chunks) * typesize
if (
chunk_bytes < target_size or abs(chunk_bytes - target_size) / target_size < 0.5
) and chunk_bytes < CHUNK_MAX:
break
if np.prod(chunks) == 1:
break # Element size larger than CHUNK_MAX
chunks[idx % ndims] = math.ceil(chunks[idx % ndims] / 2.0)
idx += 1
return tuple(int(x) for x in chunks) | [
"def expandChunk(layout, typesize, shape_json, chunk_min=CHUNK_MIN, layout_class='H5D_CHUNKED'):\n if shape_json is None or shape_json[\"class\"] == 'H5S_NULL':\n return None\n if shape_json[\"class\"] == 'H5S_SCALAR':\n return (1,) # just enough to store one item\n\n layout = list(layout)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convenience function to normalize the `chunks` argument for an array with the given `shape`. | def normalize_chunks(chunks: Any, shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]:
# N.B., expect shape already normalized
# handle auto-chunking
if chunks is None or chunks is True:
return guess_chunks(shape, typesize)
# handle no chunking
if chunks is False:
return shape
# handle 1D convenience form
if isinstance(chunks, numbers.Integral):
chunks = tuple(int(chunks) for _ in shape)
# handle bad dimensionality
if len(chunks) > len(shape):
raise ValueError("too many dimensions in chunks")
# handle underspecified chunks
if len(chunks) < len(shape):
# assume chunks across remaining dimensions
chunks += shape[len(chunks) :]
# handle None or -1 in chunks
if -1 in chunks or None in chunks:
chunks = tuple(s if c == -1 or c is None else int(c) for s, c in zip(shape, chunks))
chunks = tuple(int(c) for c in chunks)
return chunks | [
"def normalize(shape):\n s = shape\n matrix = Shape.get_matrix(s.get_vector())\n norm_x = math.sqrt(sum(matrix[:, 0] ** 2))\n norm_y = math.sqrt(sum(matrix[:, 1] ** 2))\n for pt in s.pts:\n pt.x /= norm_x\n pt.y /= norm_y\n return s",
"def normalize(arr, axis=1):\n _arr = arr if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine whether `item` specifies a complete slice of array with the given `shape`. Used to optimize __setitem__ operations on the Chunk class. | def is_total_slice(item, shape: Tuple[int]) -> bool:
# N.B., assume shape is normalized
if item == Ellipsis:
return True
if item == slice(None):
return True
if isinstance(item, slice):
item = (item,)
if isinstance(item, tuple):
return all(
(
isinstance(it, slice)
and ((it == slice(None)) or ((it.stop - it.start == sh) and (it.step in [1, None])))
)
for it, sh in zip(item, shape)
)
else:
raise TypeError("expected slice or tuple of slices, found %r" % item) | [
"def _is_total_slice(item, shape):\n\n # N.B., assume shape is normalized\n\n if item == Ellipsis:\n return True\n if item == slice(None):\n return True\n if isinstance(item, slice):\n item = item,\n if isinstance(item, tuple):\n return all(\n (isinstance(s, sli... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if all the elements of an array are equivalent to a value. If `value` is None, then this function does not do any comparison and returns False. | def all_equal(value: Any, array: Any):
if value is None:
return False
if not value:
# if `value` is falsey, then just 1 truthy value in `array`
# is sufficient to return False. We assume here that np.any is
# optimized to return on the first truthy value in `array`.
try:
return not np.any(array)
except (TypeError, ValueError): # pragma: no cover
pass
if np.issubdtype(array.dtype, np.object_):
# we have to flatten the result of np.equal to handle outputs like
# [np.array([True,True]), True, True]
return all(flatten(np.equal(value, array, dtype=array.dtype)))
else:
# Numpy errors if you call np.isnan on custom dtypes, so ensure
# we are working with floats before calling isnan
if np.issubdtype(array.dtype, np.floating) and np.isnan(value):
return np.all(np.isnan(array))
else:
# using == raises warnings from numpy deprecated pattern, but
# using np.equal() raises type errors for structured dtypes...
return np.all(value == array) | [
"def all_equal(array):\n if not array:\n raise ValueError(\"Array is empty\")\n\n first_item = array[0]\n\n if any(item != first_item for item in array):\n return False\n\n return True",
"def has_equal_values_vec(x):\n return jnp.all(x == x[0])",
"def check_array(self, array: ArrayD... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convenience function to coerce `buf` to ndarraylike array or bytes. First check if `buf` can be zerocopy converted to a contiguous array. If not, `buf` will be copied to a newly allocated `bytes` object. | def ensure_contiguous_ndarray_or_bytes(buf) -> Union[NDArrayLike, bytes]:
try:
return ensure_contiguous_ndarray_like(buf)
except TypeError:
# An error is raised if `buf` couldn't be zero-copy converted
return ensure_bytes(buf) | [
"def buffer_to_bytes(buf):\n if not isinstance(buf, bytes):\n buf = bytes(buf)\n return buf",
"def _byte_buffer(cls, data: np.ndarray) -> np.ndarray:\n view = data.view()\n view.shape = (data.size,) # Reshape while disallowing copy\n return view.view(np.uint8)",
"def test_arr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read and preprocess an image with data augmentation (random transform). | def read_for_training(p, augmentation=False):
img = imread(TRAIN + p, mode='RGB')
msk = img
if mode == 'background':
data = {'image': img}
elif mode == 'instance' or mode == 'code':
msk = imread(TRAIN_MASK + p.replace('.jpg', '.png'))
data = {'image': img, 'mask': msk}
if augmentation:
data_aug = strong_aug()(**data)
img = data_aug['image']
if 'mask' in data_aug:
msk = data_aug['mask']
if mode == 'instance' or mode == 'code':
img[~msk.astype(np.bool)] = 0
img, msk = size_normalization(img, msk)
if mode == 'code':
img = encode(img, msk)
return img, msk | [
"def preprocess(self, img):\n img_ = image.load_img(img, target_size=(299, 299))\n img_ = image.img_to_array(img_)\n img_ = np.expand_dims(img_, axis=0)\n img_ = preprocess_input(img_)\n return img_",
"def load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Amount which will increase x until it's divisible evenly by 64 | def padlen_64(x: int):
return (64 - (x % 64)) % 64 | [
"def bulk_modulus():\n\n return 10000.0",
"def mod_5(x):\r\n return x%5",
"def foo4(x):\n\tresult = 1\n\tfor i in range(1, x +1):\n\t\tresult = result * i\n\treturn result",
"def mod_5(x):\n return x % 5",
"def enlarge(n):\n\n return n* 100",
"def euler52():\n\tx = 0\n\twhile True:\n\t\tx... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Turns this userreadable string into an Alternative (no escaping) | def from_str(cls, encstr: str) -> 'Alternative':
encstr = re.sub(r'\s+', '', encstr)
return cls(*re.split('([' + string.punctuation + '])', encstr, maxsplit=1)) | [
"def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")",
"def read_alt(self, alt_string):\n match = self.alt_pattern.match(alt_string)\n if not match:\n raise SyntaxError(\"One of the ALT lines is malformed: %s\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pull a Restriction from encoded string, return remainder | def decode(cls, encstr: str) -> Tuple['Restriction', str]:
alts = []
while len(encstr) != 0:
if encstr.startswith('&'):
encstr = encstr[1:]
break
alt, encstr = Alternative.decode(encstr)
alts.append(alt)
return cls(alts), encstr | [
"def from_str(cls, encstr: str) -> 'Restriction':\n encstr = re.sub(r'\\s+', '', encstr)\n ret, remainder = cls.decode(encstr)\n if len(remainder) != 0:\n raise ValueError(\"Restriction had extrs characters at end: {}\"\n .format(remainder))\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a Restriction from an escaped string (ignoring whitespace) | def from_str(cls, encstr: str) -> 'Restriction':
encstr = re.sub(r'\s+', '', encstr)
ret, remainder = cls.decode(encstr)
if len(remainder) != 0:
raise ValueError("Restriction had extrs characters at end: {}"
.format(remainder))
return ret | [
"def format(self, y):\n try:\n if isinstance(y, RestrictionType):\n return y\n elif isinstance(eval(str(y)), RestrictionType):\n return eval(y)\n except (NameError, SyntaxError):\n pass\n raise ValueError(\"%s is not a RestrictionTy... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the restrictions against the values dict given. Normally values are treated strings, but conditions only work if they're actually integers. Returns (True, '') if everything is good. Otherwise, returns (False, reasonstring) | def are_restrictions_met(self, values: Dict[str, Any]) -> Tuple[bool, str]:,
for r in self.restrictions:
reasons = r.test(values)
if reasons is not None:
return False, reasons
return True, '' | [
"def check_condition(cond_list, values_dict):\n key = cond_list[0]\n compare_str = cond_list[1]\n number = int(cond_list[2])\n\n if key not in values_dict:\n values_dict[key] = 0\n if compare_str == \">\":\n return values_dict[key] > number\n elif compare_str == \"<\":\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Allinone check that a runestring is valid, derives from this MasterRune and passes all its conditions against the given dictionary of values or callables | def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:
try:
rune = Rune.from_base64(b64str)
except: # noqa: E722
return False, "runestring invalid"
if not self.is_rune_authorized(rune):
return False, "rune authcode invalid"
return rune.are_restrictions_met(values) | [
"def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n return MasterRune(secret).check_with_reason(b64str, values)",
"def test_should_accept_alphanumeric_formulas(self):\n validator = CharCombinationValidator()\n\n for formula in self.correct_formulas:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convenience function that the b64str runestring is valid, derives from our secret, and passes against these values. If you want to check many runes, it's more efficient to create the MasterRune first then check them, but this is fine if you're only checking one. | def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:
return MasterRune(secret).check_with_reason(b64str, values) | [
"def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n try:\n rune = Rune.from_base64(b64str)\n except: # noqa: E722\n return False, \"runestring invalid\"\n if not self.is_rune_authorized(rune):\n return False, \"rune authcode... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse read and quality strings from a FASTQ file with sequencing reads. | def readFastq(filename):
sequences = []
qualities = []
with open(filename) as fh:
while True:
fh.readline() # skip name line
seq = fh.readline().rstrip() #read base sequence
fh.readline() # skip placeholder line
qual = fh.readline().rstrip() # base quality line
if len(seq) == 0:
break
sequences.append(seq)
qualities.append(qual)
return sequences, qualities | [
"def readFastq(filename):\n sequences = []\n qualities = []\n with open(filename) as fh:\n while True:\n fh.readline() # skip name line\n seq = fh.readline().rstrip() # read base sequence\n fh.readline() # skip placeholder line\n qual = fh.readline().rs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a hash map between kmers and readings. | def kmerHashMap(reads, k):
kmers_dict = {}
# loop through all reads
for i in range(len(reads)):
# loop read's bases, except for the last k, to obtain its kmers
for j in range(1+len(reads[i])-k):
kmer = reads[i][j:k+j]
if kmers_dict.has_key(kmer):
kmers_dict[kmer].add(i)
else:
kmers_dict[kmer] = set([i])
return kmers_dict | [
"def create_hash_map(self):\n map = {}\n for brand in self.brands:\n for beer in brand.beers:\n key = round(beer.price, -2)\n if key in map:\n map[key].append(beer)\n else:\n map[key] = [beer]\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the squared L2 norm of the pattern. | def l2_norm(pattern):
return np.linalg.norm(pattern) | [
"def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))",
"def l2_norm(params):\n flattened, _ = weights_flatten(params)\n return np.dot(flattened, flattened)",
"def L2n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the l2 norm of a stack of patterns. | def l2_norm_batch(pattern_stack):
return np.linalg.norm(pattern_stack, axis=0) | [
"def l2_norm(pattern):\n return np.linalg.norm(pattern)",
"def l2_norm(params):\n flattened, _ = weights_flatten(params)\n return np.dot(flattened, flattened)",
"def L2norm(arr):\n return sqrt(add.reduce(arr**2))",
"def l2_norm(params):\n flattened, _ = flatten(params)\n return np.dot(flatte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the inner product of the two patterns as a vecter. | def inner_product(pattern_one, pattern_two):
return np.sum(np.multiply(pattern_one, pattern_two)) | [
"def inner_product(v1, v2):\r\n\r\n return v1.x * v2.x + v1.y * v2.y",
"def vector_inner_product(self, a, b):\n\t\tassert(isinstance(a, types.ListType))\n\t\tassert(isinstance(b, types.ListType))\n\t\treturn reduce(operator.add, map(operator.mul, a, b))",
"def _inner_product(self, z1, z2):\r\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the inner product pair of each pattern in batch one and batch two. Notice that the pattern_stack_one variable represent the pattern along the zero dimension while the pattern_stack_two variable represent patterns along dimension one in the final distance matrix. | def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two):
"""
Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the
other half.
"""
holder = np.zeros((pattern_num_one, pattern_num_two))
for l in range(pattern_num_one):
for m in range(pattern_num_two):
holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m]))
return holder | [
"def inner_product(pattern_one, pattern_two):\n\n return np.sum(np.multiply(pattern_one, pattern_two))",
"def l2_norm_batch(pattern_stack):\n\n return np.linalg.norm(pattern_stack, axis=0)",
"def _outer_product(cls, a, b):\n return np.reshape(np.outer(a, b), a.shape + b.shape)",
"def generate_pat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply np.exp( matrix/two_sigma_square) elementwise. | def gaussian_dense(matrix, two_sigma_square):
return np.exp(- matrix / two_sigma_square) | [
"def expval(op, dm):\n return np.tensordot(op, dm, ([0, 1], [0, 1]))",
"def kernel_sqExp(a,b, ls=1, sv=1):\n a = a.T/ls\n b = b.T/ls\n D, n = np.shape(a)\n d, m = np.shape(b)\n sqdist = np.tile((a**2).T, [1, m]) + np.tile(b*b, [n, 1]) - 2*np.dot(a.T,b)\n my_kernel = (sv**2) * np.exp(-0.5*sqdi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Each row of the matrix, let's say the jth row, represents the distance between the other data point from the jth point. This function returns the indexes for the points with the smallest distances with respect to each point represented by that specified row. By row, I mean the 0th dimension. Also notice that this function includes the target particle, i.e. the diagonal element along the matrix is set to 1. | def nearest_points_indexes_with_self(matrix, num_to_keep):
# Set the diagonal to 1
np.fill_diagonal(matrix, 1)
# Get the position for the resulted values
sort_arg = np.argsort(matrix, axis=1)
return sort_arg[:, : num_to_keep] | [
"def _closest_points(x, y):\n P = x.shape[0]\n idx = np.zeros((P,), dtype=\"uint32\")\n for i in range(P):\n d = np.sum((y - np.expand_dims(x[i], 0))**2, axis=1) # K,\n idx[i] = np.argmin(d)\n return idx",
"def find_min_distance():\n return np.argmin(d)",
"def nearest_vertex_to(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate an identity key pair. Clients should only do this once, at install time. the generated IdentityKeyPair. | def generateIdentityKeyPair():
keyPair = Curve.generateKeyPair()
publicKey = IdentityKey(keyPair.getPublicKey())
serialized = '0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2c' \
'b7c06668dcd7d5e12205011524f0c15467100dd603e0d6020f4d293' \
'edfbcd82129b14a88791ac81365c'
serialized = binascii.unhexlify(serialized.encode())
identityKeyPair = IdentityKeyPair(publicKey, keyPair.getPrivateKey())
return identityKeyPair
# return IdentityKeyPair(serialized=serialized) | [
"def __generate_asymetric_key_pair(self):\n secret_key = PrivateKey.generate()\n public_key = secret_key.public_key\n return AsymmetricKeyPair(secret_key, public_key)",
"def create_keypair(self):\n self.keypair = rsa.generate_private_key(\n public_exponent=65537,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a registration ID. Clients should only do this once, at install time. | def generateRegistrationId():
regId = KeyHelper.getRandomSequence()
return regId | [
"def generate_id():\n return str(hex(int(time.time() * 10 ** 7)))[5:]",
"def get_id(self) -> str:\n return self._register_id",
"def gen_id() -> str:\n # id is set according to the current unix time\n return f'cli-reminder-{time.time()}'",
"def gen_uuid():\n return str( uuid.uuid4() )",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a list of PreKeys. Clients should do this at install time, and subsequently any time the list of PreKeys stored on the server runs low. PreKey IDs are shorts, so they will eventually be repeated. Clients should store PreKeys in a circular buffer, so that they are repeated as infrequently as possible. start The starting PreKey ID, inclusive. count The number of PreKeys to generate. the list of generated PreKeyRecords. | def generatePreKeys(start, count):
results = []
start -= 1
for i in range(0, count):
preKeyId = ((start + i) % (Medium.MAX_VALUE - 1)) + 1
results.append(PreKeyRecord(preKeyId, Curve.generateKeyPair()))
return results | [
"def _create_keys(self):\n num_clients = self.bft_network.config.num_clients\n if num_clients == 0:\n return []\n cur = bytearray(\"A\", 'utf-8')\n keys = [b\"A....................\"]\n for i in range(1, 2 * num_clients):\n end = cur[-1]\n if chr(e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check whether the given reader exists | def exists(reader_name: str) -> bool:
return plugins.exists(package_name=__name__, plugin_name=reader_name) | [
"def hasReader(self, reader):\n return self.readermanager.hasReader(reader)",
"def find_reader(cls, name):\n names = [r.name for r in cls.reader_list]\n \n if name in names:\n print('读者:%s 已找到!' % name)\n return 1\n else:\n print('读者:%s 找不到!' % n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get one line documentation for readers If no readers are specified, documentation for all available readers are returned. | def short_docs(*readers: str) -> List[Tuple[str, str]]:
if not readers:
readers = names()
return [(r, plugins.doc(__name__, r, long_doc=False)) for r in readers] | [
"def select_reader():\r\n readers_list = readers()\r\n\r\n if readers_list:\r\n return readers_list[0]",
"def read_documentation(self, fid):\r\n\r\n lin = self.read_line(fid)\r\n while lin[0] != ':':\r\n self.documentation.append(lin)\r\n lin = self.read_line(fid)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read a bytes stream with a given reader If the reader is not specified, an attempt to guess at an appropriate reader is made. A NoReaderFound error is raised if no such appropriate reader is found. | def read_stream(
input_stream: IO[bytes], reader_name: Optional[str] = None, **reader_args: Any
) -> Reader:
if reader_name is None:
reader_name = identify(input_stream)
reader = plugins.call(
package_name=__name__,
plugin_name=reader_name,
input_stream=input_stream,
**reader_args,
)
reader.read()
return reader | [
"def getreader(encoding):\r\n return lookup(encoding).streamreader",
"def read(self, reader):\n self._verify(reader.read())\n return self",
"def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueE... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read a file with a given reader If the reader is not specified, an attempt to guess at an appropriate reader is made. A NoReaderFound error is raised if no such appropriate reader is found. | def read_file(
file_path: Union[str, pathlib.Path],
reader_name: Optional[str] = None,
**reader_args: Any,
) -> Reader:
with open(file_path, mode="rb") as input_stream:
return read_stream(input_stream, reader_name) | [
"def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test pointwise arithmetic with stencil offsets across two functions in indexed expression format | def test_indexed_stencil(self, expr, result):
j, l = dimify('j l')
a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base
fa = a.function
b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base
fb = b.function
eqn = eval(expr)
Operator(eqn)(fa, fb)
assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12) | [
"def test_unsubstituted_indexeds():\n grid = Grid(shape=(8, 8, 8))\n\n f = Function(name='f', grid=grid)\n p = TimeFunction(name='p', grid=grid)\n p1 = TimeFunction(name='p', grid=grid)\n\n f.data[:] = 0.12\n p.data[:] = 1.\n p1.data[:] = 1.\n\n eq = Eq(p.forward, sin(f)*p*f)\n\n op0 = Op... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test pointwise arithmetic with stencil offsets across a single functions with buffering dimension in indexed expression format | def test_indexed_buffered(self, expr, result):
i, j, l = dimify('i j l')
a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base
fa = a.function
eqn = eval(expr)
Operator(eqn)(fa)
assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12) | [
"def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test pointwise arithmetic with stencil offsets and open loop boundaries in indexed expression format | def test_indexed_open_loops(self, expr, result):
i, j, l = dimify('i j l')
pushed = [d.size for d in [j, l]]
j.size = None
l.size = None
a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed
fa = a.function
fa.data[0, :, :] = 2.
eqn = eval(expr)
Operator(eqn)(fa)
assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
j.size, l.size = pushed | [
"def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test calltime symbols overrides with other symbols | def test_override_symbol(self):
i, j, k, l = dimify('i j k l')
a = symbol(name='a', dimensions=(i, j, k, l), value=2.)
a1 = symbol(name='a1', dimensions=(i, j, k, l), value=3.)
a2 = symbol(name='a2', dimensions=(i, j, k, l), value=4.)
op = Operator(Eq(a, a + 3))
op()
op(a=a1)
op(a=a2)
shape = [d.size for d in [i, j, k, l]]
assert(np.allclose(a.data, np.zeros(shape) + 5))
assert(np.allclose(a1.data, np.zeros(shape) + 6))
assert(np.allclose(a2.data, np.zeros(shape) + 7)) | [
"def _unrecognized_symbol_func(**kwargs):",
"def FakeSymbol(*args, _op, **kwargs):\n return symbol.Custom(*args, _op=_op, op_type=\"_fake\", **kwargs)",
"def test_symbol(self, data, symbol_first, symbol_second):\n layer = Points(data)\n assert layer.symbol == \"disc\"\n\n layer.symbol = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test calltime symbols overrides with numpy arrays | def test_override_array(self):
i, j, k, l = dimify('i j k l')
shape = tuple(d.size for d in (i, j, k, l))
a = symbol(name='a', dimensions=(i, j, k, l), value=2.)
a1 = np.zeros(shape=shape, dtype=np.float32) + 3.
a2 = np.zeros(shape=shape, dtype=np.float32) + 4.
op = Operator(Eq(a, a + 3))
op()
op(a=a1)
op(a=a2)
shape = [d.size for d in [i, j, k, l]]
assert(np.allclose(a.data, np.zeros(shape) + 5))
assert(np.allclose(a1, np.zeros(shape) + 6))
assert(np.allclose(a2, np.zeros(shape) + 7)) | [
"def test__array__(parameter):\n par, _ = parameter\n assert np.array(par) == np.array(10.0)",
"def test_TimeArray_repr():",
"def test_format_signature_numpy():",
"def test_TimeArray_convert_unit():",
"def test_ndarray(self):\n self.check_roundtrip_ndarrays(np.zeros((5,3)))\n self.check_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the dimension sizes are being inferred correctly | def test_dimension_size_infer(self, nt=100):
i, j, k = dimify('i j k')
shape = tuple([d.size for d in [i, j, k]])
a = DenseData(name='a', shape=shape).indexed
b = TimeData(name='b', shape=shape, save=True, time_dim=nt).indexed
eqn = Eq(b[time, x, y, z], a[x, y, z])
op = Operator(eqn)
_, op_dim_sizes = op.arguments()
assert(op_dim_sizes[time.name] == nt) | [
"def check_sizes(self):\n assert self.get_queries().shape == (self.nq, self.d)\n if self.nt > 0:\n xt = self.get_train(maxtrain=123)\n assert xt.shape == (123, self.d), \"shape=%s\" % (xt.shape, )\n assert self.get_database().shape == (self.nb, self.d)\n assert self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test Box with photon shooting. Particularly the flux of the final image. | def test_box_shoot():
rng = galsim.BaseDeviate(1234)
obj = galsim.Box(width=1.3, height=2.4, flux=1.e4)
im = galsim.Image(100,100, scale=1)
im.setCenter(0,0)
added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate())
print('obj.flux = ',obj.flux)
print('added_flux = ',added_flux)
print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max())
print('image flux = ',im.array.sum())
assert np.isclose(added_flux, obj.flux)
assert np.isclose(im.array.sum(), obj.flux)
photons2 = obj.makePhot(poisson_flux=False, rng=rng)
assert photons2 == photons, "Box makePhot not equivalent to drawPhot"
obj = galsim.Pixel(scale=9.3, flux=1.e4)
added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate())
print('obj.flux = ',obj.flux)
print('added_flux = ',added_flux)
print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max())
print('image flux = ',im.array.sum())
assert np.isclose(added_flux, obj.flux)
assert np.isclose(im.array.sum(), obj.flux)
photons2 = obj.makePhot(poisson_flux=False, rng=rng)
assert photons2 == photons, "Pixel makePhot not equivalent to drawPhot"
obj = galsim.TopHat(radius=4.7, flux=1.e4)
added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate())
print('obj.flux = ',obj.flux)
print('added_flux = ',added_flux)
print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max())
print('image flux = ',im.array.sum())
assert np.isclose(added_flux, obj.flux)
assert np.isclose(im.array.sum(), obj.flux)
photons2 = obj.makePhot(poisson_flux=False, rng=rng)
assert photons2 == photons, "TopHat makePhot not equivalent to drawPhot" | [
"def shooting(self):\n pass",
"def test_photon_flux(self):\n self.assertIsInstance(self.el.photon_flux(), units.Quantity)",
"def shoot(self):\n self.assertIsInstance(gun(3).shoot(), 2)\n self.assertIsInstance(gun(10).shoot(), 9)",
"def test_photon_flux(self):\n self.assertEq... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decide whether to enter hotspot mode or wifi mode and then do so | def set_wifi_mode(args):
pass
"""+
try:
if args['mode'] == 'hotspot':
logger.info('will enter hotspot mode')
#TODO - Need to capture the line that contains interface [some lan id] and uncomment it.
change_file_line(path.join('/etc', 'dhcpcd.conf'),
interface_l1_res, 'interface {}\n'.format()
return True if args['silent'] else 'Ok'
if args['mode'] == 'wi-fi':
logger.info('will enter wi-fi mode')
return True if args['silent'] else 'Ok'
else:
logger.error('Unknown wi-fi mode: {}'.format(args['mode']))
return False if args['silent'] else 'ERROR'
except:
logger.error('Exception in set_wifi_mode: {}, {}'.format(exc_info()[0], exc_info()[1]))
return False if args['silent'] else 'ERROR'
""" | [
"def wifi(self):\n if not hasattr(self, \"_wifi\"):\n cmd = [\"networksetup\", \"-getairportpower\", \"en0\"]\n output = subprocess.check_output(cmd)\n self._wifi = \"On\" in str(output)\n return self._wifi",
"def test_usb_tethering_coexist_wifi_hotspot(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that returns true if a string contains a number | def hasNumbers(inputString):
return any(char.isdigit() for char in inputString) | [
"def hasnumber(self, s):\n return bool(re.search(r'\\d', s))",
"def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))",
"def hasReNumbers(inputString):\n return bool(re.search(r'\\d', inputString))",
"def _is_number(string: str) -> bool:\n try:\n int(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that saves the return_list from make_time to a file called yt_vids.txt Optional, default False | def save_link_time(return_list, path_to_download):
# Opens a new file and writes lines to it and saves it at the spot provided
with open(os.path.join(path_to_download, "yt_vids.txt"), "w") as w:
w.write('\n'.join('{} {} {}'.format(
x[0], x[1][0], x[1][1]) for x in return_list)) | [
"def write_list_to_file(my_list):\r\n with open(str(filename.strftime(\"%Y-%m-%d-%H-%M-%S-%f\")) + \".txt\", \"w\") as file:\r\n for line in my_list:\r\n file.write(str(line)+ \"\\n\") #Writing line by line\r",
"def export_times(self, filename=None):\r\n if filename is None:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that downloads a whole video when no interval is supplied Downloaded to the same place where yt_vids is saved to (from save_link_time func) | def download_whole(no_interval):
print(os.getcwd())
SAVE_PATH = 'tmp'
ydl_opts = {"nocheckcertificate": True, "noplaylist": True,
'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
for video in range(len(no_interval)):
try:
ydl.download([no_interval[video]])
except youtube_dl.utils.ExtractorError or youtube_dl.utils.DownloadError:
print(f"Couldn't download {no_interval[video]}")
continue | [
"def download_video(self):\n track = self.f_name + self.file_type\n # youtube_cmd = [\n # \"youtube-dl\", self.link, \"-f\",\n # self.file_type, \"-o\", track\n # ]\n\n youtube_cmd = [\n \"youtube-dl\", self.link, \"-o\", track, \"-f\", \"webm\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to download videos in specified intervals Takes a list (interval_list) and a path as inputs | def download_interval(interval_list):
start = ['start', 'begin', 'beginning', 'head', 'first']
end = ['slut', 'end', 'tail', 'finish',
'finito', 'fin', 'done', 'finished']
# Iterate over the list
for link in range(len(interval_list)):
try:
video = pafy.new(interval_list[link][0], ydl_opts={
'nocheckcertificate': True, "noplaylist": True})
# Only downloads the video if the video hasn't been downloaded before
if not os.path.exists(os.path.join("tmp", f"{video.title}.mp4")):
video_s = video.getbestvideo()
# TODO: add a way to get the second best stream (third etc.) when an error occurs using Pafy.videostreams and going through the list
video_a = video.getbestaudio()
# Checks if the end point is a string
if interval_list[link][1][1].lower() in end:
# Where is the stream, where should we start, how long should it run
mp4_vid = ffmpeg.input(
video_s.url, ss=interval_list[link][1][0], t=video.duration)
mp4_aud = ffmpeg.input(
video_a.url, ss=interval_list[link][1][0], t=video.duration)
else:
# Where is the stream, where should we start, how long should it run
mp4_vid = ffmpeg.input(
video_s.url, ss=interval_list[link][1][0], t=interval_list[link][1][1])
mp4_aud = ffmpeg.input(
video_a.url, ss=interval_list[link][1][0], t=interval_list[link][1][1])
# Do the processing
try:
(
ffmpeg
.concat(
# Specify what you want from the streams (v for video and a for audio)
mp4_vid['v'],
mp4_aud['a'],
# One video stream and one audio stream
v=1,
a=1
)
# Output is title of video with mp4 ending
.output(os.path.join("tmp", f'{video.title}.mp4'))
.run()
)
except TypeError as e:
print(f"An error occurred e 0: {e}")
except ffmpeg._run.Error as e:
print(f"An error occurred e 1: {e}")
except Exception as e:
print(f"I couldn't download {interval_list[link]} due to: {e}") | [
"def download_whole(no_interval):\n print(os.getcwd())\n SAVE_PATH = 'tmp'\n ydl_opts = {\"nocheckcertificate\": True, \"noplaylist\": True,\n 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'}\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n for video in range(len(no_interval)):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to download pictures from the input sequence | def download_pics(pics_links):
for link in range(len(pics_links)):
r = requests.get(pics_links[link][0])
with open(os.path.join("tmp", f"{link}.jpg"), "wb") as dl:
dl.write(r.content) | [
"def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get chain attribute for an object. | def chain_getattr(obj, attr, value=None):
try:
return _resolve_value(safe_chain_getattr(obj, attr))
except AttributeError:
return value | [
"def get_deep_attr(obj, value):\n subelts = value.split('.', 1)\n if len(subelts) == 1:\n return getattr(obj, value)\n else:\n return get_deep_attr(getattr(obj, subelts[0]), subelts[1])",
"def chained_getattr(obj, path):\n target = obj\n for attr in path:\n target = corner_case_getattr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
trim the list to make total length no more than limit.If split specified,a string is return. | def trim_iterable(iterable, limit, *, split=None, prefix='', postfix=''):
if split is None:
sl = 0
join = False
else:
sl = len(split)
join = True
result = []
rl = 0
for element in iterable:
element = prefix + element + postfix
el = len(element)
if len(result) > 0:
el += sl
rl += el
if rl <= limit:
result.append(element)
else:
break
if join:
result = split.join(result)
return result | [
"def Trim(lst: List[T], limit: int) -> List[T]:\n limit = max(0, limit)\n\n clipping = lst[limit:]\n del lst[limit:]\n return clipping",
"def allow_max_length(self, input_list, max_len=150):\n if (len(input_list) > max_len):\n return input_list[:max_len]\n return input_list",
"def s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
It raises an error when trying to decrypt a nonencrypted value. | def test_decrypt_format(self):
with pytest.raises(EncryptionError):
decrypt('message') | [
"def test_incorrect_decrypt_message(cipher):\n with pytest.raises(AssertionError):\n decrypted = cipher.decrypt('U6DQfhE17od2Qe4TPZFJHn3LOMkpPDqip77e4b5uv7s=')\n assert decrypted == 'Wrong string'",
"def decrypt(ciphertext):\n return ciphertext",
"def test_decrypt_key(self):\n key = b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
It accepts a custom decryption key. | def test_decrypt_key(self):
key = b'0' * 32
encrypted = encrypt('message', key=key)
assert decrypt(encrypted, key=key) == 'message' | [
"def decrypt_key (key, tenant_id):\n try:\n key = RSA.importKey(key,tenant_id)\n unencrypted_key = key.exportKey('PEM')\n if isinstance(unencrypted_key, ValueError):\n raise NfvoException(\"Unable to decrypt the private key: {}\".format(unencrypted_key), httperrors.Internal_Server... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
It reencrypts an encrypted message using a new key. | def test_rekey(self):
old_key = b'0' * 32
new_key = b'1' * 32
old_encrypted = encrypt('message', key=old_key)
new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)
assert decrypt(new_encrypted, key=new_key) == 'message' | [
"def rekey(self,newkeyid, oldkeyid):\n self.blob = _encrypt(self._decrypt(self.blob, self.key), keyid)\n self.keyid = keyid",
"def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
It raises an error when trying to rekey a nonencrypted value. | def test_rekey_non_encrypted(self):
with pytest.raises(EncryptionError):
rekey('message', old_key=b'0' * 32, new_key=b'1' * 32) | [
"def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)",
"def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
It raises an error when given an invalid new key. | def test_rekey_key_format(self):
old_key = b'0' * 32
encrypted = encrypt('message', key=old_key)
with pytest.raises(EncryptionError):
rekey(encrypted, old_key=old_key, new_key=b'1' * 31) | [
"def is_invalid(self, key): # pragma: no cover\n\t\traise NotImplementedError",
"def _newKey(self, key):\n pass",
"def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)",
"def keyIssue(issue,key,keySet):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if domain is marked sensitive | def is_domain_sensitive(name):
query = database.session_query(Domain)
query = query.filter(and_(Domain.sensitive, Domain.name == name))
return database.find_all(query, Domain, {}).all() | [
"def sensitive(self) -> Optional[bool]:\n return pulumi.get(self, \"sensitive\")",
"def is_sensitive(self):\n return self._is_sensitive",
"def is_domain_explicit(self):\n\n return len(self.domain) != 2",
"def is_sensitive(self) -> Optional[bool]:\n return pulumi.get(self, \"is_sens... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update an existing domain | def update(domain_id, name, sensitive):
domain = get(domain_id)
domain.name = name
domain.sensitive = sensitive
database.update(domain) | [
"def domain_update(self, domain):\n\t\treturn protocol.Request_DOMAIN_UPDATE(domain=domain)",
"def update_domain_entry(domainName=None, domainEntry=None):\n pass",
"def test_update_domain(self):\n pass",
"def edit_domain(domain_name):\n\n if request.method == \"POST\":\n domain = session.q... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Establish a TCP connection to the indiserver via port 7624 | def connect_to_indi():
indiclient=IndiClient()
indiclient.setServer("localhost",7624)
# Ensure the indiserver is running
if (not(indiclient.connectServer())):
print("No indiserver running on "+indiclient.getHost()+":"+str(indiclient.getPort())+" - Try to run")
print(" indiserver indi_sx_ccd")
sys.exit(1)
return indiclient | [
"def connect(self):\n self._ctrl_socket.connect(self._host, 8888)",
"def open(self):\n try:\n srvaddr = (socket.TIPC_ADDR_NAME,\n self.port,\n 5,\n 0)\n self.handle = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)\n except Exception... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connection routine for the CCD (given below in ccd variable). The following CCD properties are accessed. More can be found by going to indilib.org. CONNECTION Switch CCD_EXPOSURE Number CCD1 BLOB CCD_BINNING Number CCD_ABORT_EXPOSURE Number CCD_TEMPERATURE Number CCD_COOLER Switch CCD_FRAME_TYPE Switch | def connect_to_ccd():
ccd="SX CCD SXVR-H694"
device_ccd=indiclient.getDevice(ccd)
while not(device_ccd):
time.sleep(0.5)
device_ccd=indiclient.getDevice(ccd)
print("Searching for device...")
print("Found device")
ccd_connect=device_ccd.getSwitch("CONNECTION")
while not(ccd_connect):
time.sleep(0.5)
ccd_connect=device_ccd.getSwitch("CONNECTION")
if not(device_ccd.isConnected()):
ccd_connect[0].s=PyIndi.ISS_ON # the "CONNECT" switch
ccd_connect[1].s=PyIndi.ISS_OFF # the "DISCONNECT" switch
indiclient.sendNewSwitch(ccd_connect)
ccd_exposure=device_ccd.getNumber("CCD_EXPOSURE")
while not(ccd_exposure):
time.sleep(0.5)
ccd_exposure=device_ccd.getNumber("CCD_EXPOSURE")
# inform the indi server that we want to receive the
# "CCD1" blob from this device
indiclient.setBLOBMode(PyIndi.B_ALSO, ccd, "CCD1")
ccd_ccd1=device_ccd.getBLOB("CCD1")
while not(ccd_ccd1):
time.sleep(0.5)
ccd_ccd1=device_ccd.getBLOB("CCD1")
# get access to setting the CCD's binning value
ccd_bin=device_ccd.getNumber("CCD_BINNING")
while not(ccd_bin):
time.sleep(0.5)
ccd_bin=device_ccd.getNumber("CCD_BINNING")
# get access to aborting the CCD's exposure
ccd_abort=device_ccd.getSwitch("CCD_ABORT_EXPOSURE")
while not(ccd_abort):
time.sleep(0.5)
ccd_abort=device_ccd.getSwitch("CCD_ABORT_EXPOSURE")
# get access to the CCD's temperature value
ccd_temp=device_ccd.getNumber("CCD_TEMPERATURE")
while not(ccd_temp):
time.sleep(0.5)
ccd_temp=device_ccd.getNumber("CCD_TEMPERATURE")
# get access to switching the CCD's cooler on/off
ccd_cooler=device_ccd.getSwitch("CCD_COOLER")
while not(ccd_cooler):
time.sleep(0.5)
ccd_cooler=device_ccd.getSwitch("CCD_COOLER")
# get access to switching the CCD's image frame type
ccd_frame=device_ccd.getSwitch("CCD_FRAME_TYPE")
while not(ccd_frame):
time.sleep(0.5)
ccd_frame=device_ccd.getSwitch("CCD_FRAME_TYPE")
return ccd_exposure, ccd_ccd1, ccd_bin, ccd_abort, ccd_temp, ccd_cooler, ccd_frame | [
"def ccd(self):\n self.spectrum = self.spectrum",
"def get_ccd_info(self, handle):\n # 'CCD_INFO_IMAGING' will get firmware version, and a list of readout modes (binning)\n # with corresponding image widths, heights, gains and also physical pixel width, height.\n ccd_info_params0 = Get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the last numbered image in the current directory. | def last_image(fileDir):
lastNum = 0
lastImg = ''
# find the name and number of the last image in the current directory
for f in os.listdir(fileDir):
if os.path.isfile(os.path.join(fileDir, f)):
file_name = os.path.splitext(f)[0]
file_name2 = file_name[4:]
try:
file_num = int(file_name2)
if file_num > lastNum:
lastNum = file_num
lastImg = os.path.join(fileDir, f)
except ValueError:
'The file name "%s" is not an integer. Skipping' % file_name
return lastNum, lastImg | [
"def get_last_counter():\n counter = imageNumStart\n if imageNumOn:\n image_ext = \".jpg\"\n search_str = imagePath + \"/*\" + image_ext\n file_prefix_len = len(imagePath + imageNamePrefix)+1\n try:\n # Scan image folder for most recent jpg file\n # and try to e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sends an exposure command to the CCD given the type of frame and exposure time. The received BLOB is of FITS type and is | def exposure(frameType, expTime):
blobEvent.clear()
# set the specified frame type
if frameType.lower() == 'light':
ccd_frame[0].s = PyIndi.ISS_ON
ccd_frame[1].s = PyIndi.ISS_OFF
ccd_frame[2].s = PyIndi.ISS_OFF
ccd_frame[3].s = PyIndi.ISS_OFF
indiclient.sendNewSwitch(ccd_frame)
elif frameType.lower() == 'bias':
ccd_frame[0].s = PyIndi.ISS_OFF
ccd_frame[1].s = PyIndi.ISS_ON
ccd_frame[2].s = PyIndi.ISS_OFF
ccd_frame[3].s = PyIndi.ISS_OFF
indiclient.sendNewSwitch(ccd_frame)
elif frameType.lower() == 'dark':
ccd_frame[0].s = PyIndi.ISS_OFF
ccd_frame[1].s = PyIndi.ISS_OFF
ccd_frame[2].s = PyIndi.ISS_ON
ccd_frame[3].s = PyIndi.ISS_OFF
indiclient.sendNewSwitch(ccd_frame)
elif frameType.lower() == 'flat':
ccd_frame[0].s = PyIndi.ISS_OFF
ccd_frame[1].s = PyIndi.ISS_OFF
ccd_frame[2].s = PyIndi.ISS_OFF
ccd_frame[3].s = PyIndi.ISS_ON
indiclient.sendNewSwitch(ccd_frame)
# set the value for the next exposure
ccd_exposure[0].value=expTime
indiclient.sendNewNumber(ccd_exposure)
# wait for the exposure
blobEvent.wait()
for blob in ccd_ccd1:
# pyindi-client adds a getblobdata() method to IBLOB item
# for accessing the contents of the blob, which is a bytearray in Python
image_data=blob.getblobdata()
# write the byte array out to a FITS file
global imgNum
global imgName
imgNum += 1
fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits'
f = open(fileName, 'wb')
f.write(image_data)
f.close()
imgName = fileName
return fileName | [
"def expose(self, cmd, expTime, expType, filename):\n\n if not expType:\n expType = 'test'\n if cmd:\n cmd.inform('exposureState=\"exposing\"')\n if expType not in ('bias', 'test') and expTime > 0:\n # The expTime unit is ms.\n time.sleep((expTime / 1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the method that receives the client's data and decides what to do with it. It runs in a loop to always be accepting new connections. If the data is 'status', the CCD status is returned. If the data is 'stop', the current exposure is stopped. If the data is anything else, a new thread is created and the data is sent to handle_command(). | async def handle_client(reader, writer):
request = None
# loop to continually handle incoming data
while request != 'quit':
request = (await reader.read(255)).decode('utf8')
print(request.encode('utf8'))
#log.info('COMMAND = '+request)
writer.write(('COMMAND = '+request.upper()+'\n').encode('utf8'))
response = 'BAD'
# check if data is empty, a status query, or potential command
dataDec = request
if dataDec == '':
break
elif 'status' in dataDec.lower():
response = 'OK'
# check if the command thread is running
try:
if exposureState() > 0:
response = response + '\nBUSY'
else:
response = response + '\nIDLE'
except:
response = response + '\nIDLE'
if ccd_frame[0].s == PyIndi.ISS_ON:
frameType = 'LIGHT'
elif ccd_frame[1].s == PyIndi.ISS_ON:
frameType = 'BIAS'
elif ccd_frame[2].s == PyIndi.ISS_ON:
frameType = 'DARK'
elif ccd_frame[3].s == PyIndi.ISS_ON:
frameType = 'FLAT'
response = response+\
'\nBIN MODE = '+str(ccd_bin[0].value)+'x'+str(ccd_bin[1].value)+\
'\nCCD TEMP = '+str(ccd_temp[0].value)+\
'C\nLAST FRAME TYPE = '+str(frameType)+\
'\nFILE DIR = '+str(fileDir)+\
'\nLAST IMAGE = '+str(imgName)
# send current status to open connection & log it
#log.info('RESPONSE: '+response)
writer.write((response+'\nDONE\n').encode('utf-8'))
elif 'stop' in dataDec.lower():
# check if the command thread is running
try:
if comThread.is_alive():
response = 'OK: aborting exposure'
ccd_abort[0].s=PyIndi.ISS_ON
indiclient.sendNewSwitch(ccd_abort)
blobEvent.set() #Ends the currently running thread.
response = response+'\nExposure Aborted'
else:
response = 'OK: idle'
except:
response = 'OK: idle'
# send current status to open connection & log it
#log.info('RESPONSE = '+response)
writer.write((response+'\nDONE\n').encode('utf-8'))
else:
# check if the command thread is running, may fail if not created yet, hence try/except
try:
if comThread.is_alive():
response = 'BAD: busy'
# send current status to open connection & log it
#log.info('RESPONSE = '+response)
writer.write((response+'\nDONE\n').encode('utf-8'))
else:
# create a new thread for the command
comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,))
comThread.start()
except:
# create a new thread for the command
comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,))
comThread.start()
await writer.drain()
writer.close() | [
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n self.recieved_string = \"\"\n\n print \"Client connected at \" + self.ip + \":\" + str(self.port)\n\n # Loop that listens for messages from the clie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get list of names of accessible repositories (including owner) | def list_repositories(self):
data = self._get_all_data('/user/repos')
return [repo['full_name'] for repo in data] | [
"def repositories():\n return user.repos()",
"def listRepositories(self):\n return self.mini_catalog.listRepositories()",
"def list_repositories(self):\n return list(self.repositories.values())",
"def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the followin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get dict of labels with colors for given repository slug | def list_labels(self, repository):
data = self._get_all_data('/repos/{}/labels'.format(repository))
return {l['name']: str(l['color']) for l in data} | [
"def get_colors() -> Dict[str, cq.Color]:\n colors = {}\n for name in dir(Quantity):\n splitted = name.rsplit(SEP, 1)\n if splitted[0] == OCP_COLOR_LEADER:\n colors.update({splitted[1].lower(): cq.Color(splitted[1])})\n\n return colors",
"def get_label_color(status):\n\n color... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create new label in given repository | def create_label(self, repository, name, color, **kwargs):
data = {'name': name, 'color': color}
response = self.session.post(
'{}/repos/{}/labels'.format(self.GH_API_ENDPOINT, repository),
json=data
)
if response.status_code != 201:
raise GitHubError(response) | [
"def test_issue_create_label(self):\n pass",
"def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update existing label in given repository | def update_label(self, repository, name, color, old_name=None, **kwargs):
data = {'name': name, 'color': color}
response = self.session.patch(
'{}/repos/{}/labels/{}'.format(
self.GH_API_ENDPOINT, repository, old_name or name
),
json=data
)
if response.status_code != 200:
raise GitHubError(response) | [
"def update(self):\n args = {attr: getattr(self, attr) for attr in self.to_update}\n args[\"id\"] = self.id\n _perform_command(self.owner, \"label_update\", args)",
"def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete existing label in given repository | def delete_label(self, repository, name, **kwargs):
response = self.session.delete(
'{}/repos/{}/labels/{}'.format(
self.GH_API_ENDPOINT, repository, name
)
)
if response.status_code != 204:
raise GitHubError(response) | [
"def delete_label(self, label):\n return self.label(label, action='DELETE')",
"def test_issue_delete_label(self):\n pass",
"def delete(self):\n args = {\"id\": self.id}\n _perform_command(self.owner, \"label_delete\", args)",
"def repository_delete(ctx: click.Context, repository_na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts feature vectors from a given model and dataset and writes them, along with labels, to a file. This function works for any model whose forward() method returns, on any given input x, the pair (prediction on x, feature vector for x) and more generally, any model whose second return value is a feature vector. | def extract_feature_vectors(model, data_loader, parameters, features_file_path):
feature_vectors, label_vectors = [], []
# Set model to evaluation mode
model.eval()
# Show progress bar while iterating over mini-batches
with tqdm(total=len(data_loader)) as progress_bar:
for i, (X_batch, Y_batch) in enumerate(data_loader):
# Dimensions of the input Tensor
batch_size, channels, height, width = X_batch.size()
# If GPU available, enable CUDA on data
if parameters.cuda:
X_batch = X_batch.cuda()
Y_batch = Y_batch.cuda()
# Wrap the input tensor in a Torch Variable
X_batch_variable = Variable(X_batch, volatile=True)
# Run the model on this batch of inputs, obtaining a Variable of predicted labels and a Variable of features
Y_predicted, features = model(X_batch_variable)
# Convert the features Variable (of size [batch_size, 1024]) to a Tensor, move it to
# CPU, and convert it to a NumPy array
features_numpy = features.data.cpu().numpy()
# Move the labels Tensor (of size [batch_size, 14]) to CPU and convert it to a NumPy array
Y_numpy = Y_batch.cpu().numpy()
# For each example in the batch, record its features and labels
for j in range(batch_size):
feature_vectors.append(features_numpy[j,:])
label_vectors.append(Y_numpy[j,:])
progress_bar.update()
utils.write_feature_and_label_vectors(features_file_path, feature_vectors, label_vectors) | [
"def save_vectors (feat_vec = None, labels = None, file_extension = None):\n\n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n\n prettyPrint('Saving feature vector file: {0} ... \\n'\n 'Saving Labels file: {1} ... '.format(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the average distance between pairs of vectors in a given list of vectors. | def average_distance_between_vectors(vectors, distance):
vectors = numpy.array(vectors)
vectors = vectors - numpy.mean(vectors, axis=0)
vectors = normalize(vectors)
vectors = list(vectors)
average_distance = utils.RunningAverage()
for vector_1, vector_2 in itertools.combinations(vectors, r=2): # All pairs of vectors
average_distance.update(distance(vector_1, vector_2))
return average_distance() | [
"def v_avg(vecs):\n xs = []\n ys = []\n\n for v in vecs:\n xs.append(v[0])\n ys.append(v[1])\n\n xresult = 0\n for x in xs:\n xresult += x\n\n yresult = 0\n for y in ys:\n yresult += y\n\n return xresult / len(xs), yresult / len(ys)",
"def compute_average(vec_li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |