code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
args = {
'level': level,
}
self._level_chk.check(args)
return self._client.json('logger.set_level', args)
|
def set_level(self, level)
|
Set the log level of the g8os
Note: this level is for messages that ends up on screen or on log file
:param level: the level to be set can be one of ("CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG")
| 8.674695
| 10.591051
| 0.819059
|
args = {
'queue': queue,
'levels': list(levels),
}
self._subscribe_chk.check(args)
return self._client.json('logger.subscribe', args)
|
def subscribe(self, queue=None, *levels)
|
Subscribe to the aggregated log stream. On subscribe a ledis queue will be fed with all running processes
logs. Always use the returned queue name from this method, even if u specified the queue name to use
Note: it is legal to subscribe to the same queue, but would be a bad logic if two processes are trying to
read from the same queue.
:param queue: Your unique queue name, otherwise, a one will get generated for your
:param levels:
:return: queue name to pull from
| 7.113045
| 9.76582
| 0.728361
|
args = {
'port': port,
'interface': interface,
'subnet': subnet,
}
self._port_chk.check(args)
return self._client.json('nft.drop_port', args)
|
def drop_port(self, port, interface=None, subnet=None)
|
close an opened port (takes the same parameters passed in open)
:param port: then port number
:param interface: an optional interface to close the port for
:param subnet: an optional subnet to close the port for
| 5.920388
| 6.63178
| 0.89273
|
args = {
'key': key,
'tags': tags,
}
self._query_chk.check(args)
return self._client.json('aggregator.query', args)
|
def query(self, key=None, **tags)
|
Query zero-os aggregator for current state object of monitored metrics.
Note: ID is returned as part of the key (if set) to avoid conflict with similar metrics that
has same key. For example, a cpu core nr can be the id associated with 'machine.CPU.percent'
so we can return all values for all the core numbers in the same dict.
U can filter on the ID as a tag
:example:
self.query(key=key, id=value)
:param key: metric key (ex: machine.memory.ram.available)
:param tags: optional tags filter
:return: dict of {
'key[/id]': state object
}
| 7.575111
| 7.353566
| 1.030128
|
disks = [] if disks is None else disks
args = {
"host": host,
"port": port,
"disks": disks
}
self._rtinfo_start_params_chk.check(args)
return self._client.json("rtinfo.start", args)
|
def start(self, host="localhost", port=8999, disks=None)
|
Start rtinfo-client
:param host: str rtinfod host address
:param port: int rtinfod client port
:param disks: list of prefixes of wathable disks (e.g ["sd"])
| 6.263923
| 5.581552
| 1.122255
|
args = {
"host": host,
"port": port,
}
self._rtinfo_stop_params_chk.check(args)
return self._client.json("rtinfo.stop", args)
|
def stop(self, host, port)
|
Stop rtinfo-client
:param host: str rtinfod host address
:param port: int rtinfod client port
| 8.25046
| 6.495664
| 1.270149
|
args = {
'subsystem': subsystem,
'name': name,
}
self._cgroup_chk.check(args)
return self._client.json('cgroup.ensure', args)
|
def ensure(self, subsystem, name)
|
Creates a cgroup if it doesn't exist under the specified subsystem
and the given name
:param subsystem: the cgroup subsystem (currently support 'memory', and 'cpuset')
:param name: name of the cgroup to delete
| 7.731126
| 8.865102
| 0.872085
|
args = {
'subsystem': subsystem,
'name': name,
'pid': pid,
}
self._task_chk.check(args)
return self._client.json('cgroup.task-add', args)
|
def task_add(self, subsystem, name, pid)
|
Add process (with pid) to a cgroup
:param subsystem: the cgroup subsystem (currently support 'memory', and 'cpuset')
:param name: name of the cgroup
:param pid: PID to add
| 5.590379
| 5.386932
| 1.037767
|
args = {
'name': name,
'mem': mem,
'swap': swap,
}
self._memory_spec.check(args)
return self._client.json('cgroup.memory.spec', args)
|
def memory(self, name, mem=0, swap=0)
|
Set/Get memory cgroup specification/limitation
the call to this method will always GET the current set values for both mem and swap.
If mem is not zero, the memory will set the memory limit to the given value, and swap to the given value (even 0)
:param mem: Set memory limit to the given value (in bytes), ignore if 0
:param swap: Set swap limit to the given value (in bytes) (only if mem is not zero)
:return: current memory limitation
| 5.280621
| 5.562266
| 0.949365
|
args = {
'name': name,
'cpus': cpus,
'mems': mems,
}
self._cpuset_spec.check(args)
return self._client.json('cgroup.cpuset.spec', args)
|
def cpuset(self, name, cpus=None, mems=None)
|
Set/Get cpuset cgroup specification/limitation
the call to this method will always GET the current set values for both cpus and mems
If cpus, or mems is NOT NONE value it will be set as the spec for that attribute
:param cpus: Set cpus affinity limit to the given value (0, 1, 0-10, etc...)
:param mems: Set mems affinity limit to the given value (0, 1, 0-10, etc...)
:return: current cpuset
| 4.600903
| 5.107667
| 0.900783
|
if not id:
id = str(uuid.uuid4())
payload = {
'id': id,
'command': command,
'arguments': arguments,
'queue': queue,
'max_time': max_time,
'stream': stream,
'tags': tags,
}
self._raw_chk.check(payload)
flag = 'result:{}:flag'.format(id)
self._redis.rpush('core:default', json.dumps(payload))
if self._redis.brpoplpush(flag, flag, DefaultTimeout) is None:
TimeoutError('failed to queue job {}'.format(id))
logger.debug('%s >> g8core.%s(%s)', id, command, ', '.join(("%s=%s" % (k, v) for k, v in arguments.items())))
return Response(self, id)
|
def raw(self, command, arguments, queue=None, max_time=None, stream=False, tags=None, id=None)
|
Implements the low level command call, this needs to build the command structure
and push it on the correct queue.
:param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...)
check documentation for list of built in commands
:param arguments: A dict of required command arguments depends on the command name.
:param queue: command queue (commands on the same queue are executed sequentially)
:param max_time: kill job server side if it exceeded this amount of seconds
:param stream: If True, process stdout and stderr are pushed to a special queue (stream:<id>) so
client can stream output
:param tags: job tags
:param id: job id. Generated if not supplied
:return: Response object
| 4.171219
| 4.103578
| 1.016484
|
zeta = 0.0
if phi1 == phi2 and theta1 == theta2:
zeta = 0.0
else:
argument = sin(theta1)*sin(theta2)*cos(phi1-phi2) + \
cos(theta1)*cos(theta2)
if argument < -1:
zeta = np.pi
elif argument > 1:
zeta = 0.0
else:
zeta = acos(argument)
return zeta
|
def calczeta(phi1, phi2, theta1, theta2)
|
Calculate the angular separation between position (phi1, theta1) and
(phi2, theta2)
| 2.346375
| 2.331843
| 1.006232
|
if m >= k:
factor = sqrt(factorial(l-k)*factorial(l+m)/factorial(l+k)/factorial(l-m))
part2 = (cos(theta1/2))**(2*l+k-m)*(-sin(theta1/2))**(m-k)/factorial(m-k)
part3 = sp.hyp2f1(m-l,-k-l,m-k+1,-(tan(theta1/2))**2)
return factor*part2*part3
else:
return (-1)**(m-k) * dlmk(l,k,m,theta1)
|
def dlmk(l,m,k,theta1)
|
returns value of d^l_mk as defined in allen, ottewill 97.
Called by Dlmk
| 4.027442
| 4.224451
| 0.953365
|
return exp(complex(0.,-m*phi1)) * dlmk(l,m,k,theta1) * \
exp(complex(0.,-k*gamma(phi1,phi2,theta1,theta2)))
|
def Dlmk(l,m,k,phi1,phi2,theta1,theta2)
|
returns value of D^l_mk as defined in allen, ottewill 97.
| 5.402311
| 5.777548
| 0.935053
|
if phi1 == phi2 and theta1 == theta2:
gamma = 0
else:
gamma = atan( sin(theta2)*sin(phi2-phi1) / \
(cos(theta1)*sin(theta2)*cos(phi1-phi2) - \
sin(theta1)*cos(theta2)) )
dummy_arg = (cos(gamma)*cos(theta1)*sin(theta2)*cos(phi1-phi2) + \
sin(gamma)*sin(theta2)*sin(phi2-phi1) - \
cos(gamma)*sin(theta1)*cos(theta2))
if dummy_arg >= 0:
return gamma
else:
return pi + gamma
|
def gamma(phi1,phi2,theta1,theta2)
|
calculate third rotation angle
inputs are angles from 2 pulsars
returns the angle.
| 2.424573
| 2.37787
| 1.019641
|
rotated_gamma = 0
for ii in range(2*l+1):
rotated_gamma += Dlmk(l,m,ii-l,phi1,phi2,theta1,theta2).conjugate()*gamma_ml[ii]
return rotated_gamma
|
def rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml)
|
This function takes any gamma in the computational frame and rotates it to the
cosmic frame.
| 4.201442
| 4.734344
| 0.887439
|
if m>0:
ans=(1./sqrt(2))*(rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml) + \
(-1)**m*rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml))
return ans.real
if m==0:
return rotated_Gamma_ml(0,l,phi1,phi2,theta1,theta2,gamma_ml).real
if m<0:
ans=(1./sqrt(2)/complex(0.,1))*(rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml) - \
(-1)**m*rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml))
return ans.real
|
def real_rotated_Gammas(m,l,phi1,phi2,theta1,theta2,gamma_ml)
|
This function returns the real-valued form of the Overlap Reduction Functions,
see Eqs 47 in Mingarelli et al, 2013.
| 1.988835
| 2.039323
| 0.975243
|
if formbats:
psr.formbats()
res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0]
res -= numpy.sum(res/err**2) / numpy.sum(1/err**2)
return numpy.sum(res * res / (1e-12 * err * err))
|
def chisq(psr,formbats=False)
|
Return the total chisq for the current timing solution,
removing noise-averaged mean residual, and ignoring deleted points.
| 7.656826
| 6.876737
| 1.113439
|
if formbats:
psr.formbats()
res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0]
res -= numpy.sum(res/err**2) / numpy.sum(1/err**2)
# bats already updated by residuals(); skip constant-phase column
M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[psr.deleted==0,1:]
# renormalize design-matrix columns
if renormalize:
norm = numpy.sqrt(numpy.sum(M**2,axis=0))
M /= norm
else:
norm = 1.0
# compute chisq derivative, de-renormalize
dr = -2 * numpy.dot(M.T,res / (1e-12 * err**2)) * norm
return dr
|
def dchisq(psr,formbats=False,renormalize=True)
|
Return gradient of total chisq for the current timing solution,
after removing noise-averaged mean residual, and ignoring deleted points.
| 7.189427
| 6.918782
| 1.039117
|
ctr, err = psr.vals(), psr.errs()
# to avoid losing precision, we're searching in units of parameter errors
if numpy.any(err == 0.0):
print("Warning: one or more fit parameters have zero a priori error, and won't be searched.")
hloc, hval = [], []
def func(xs):
psr.vals([c + x*e for x,c,e in zip(xs,ctr,err)])
ret = chisq(psr,formbats=formbats)
if numpy.isnan(ret):
print("Warning: chisq is nan at {0}.".format(psr.vals()))
if history:
hloc.append(psr.vals())
hval.append(ret)
return ret
def dfunc(xs):
psr.vals([c + x*e for x,c,e in zip(xs,ctr,err)])
dc = dchisq(psr,formbats=formbats,renormalize=renormalize)
ret = numpy.array([d*e for d,e in zip(dc,err)],'d')
return ret
opts = kwargs.copy()
if method not in ['Nelder-Mead','Powell']:
opts['jac'] = dfunc
if method in ['L-BFGS-B']:
opts['bounds'] = [(float((bounds[par][0] - ctr[i])/err[i]),
float((bounds[par][1] - ctr[i])/err[i])) if par in bounds else (None,None)
for i,par in enumerate(psr.pars())]
res = scipy.optimize.minimize(func,[0.0]*len(ctr),method=method,**opts)
if hasattr(res,'message'):
print(res.message)
# this will also set parameters to the minloc
minchisq = func(res.x)
if history:
return minchisq, numpy.array(hval), numpy.array(hloc)
else:
return minchisq
|
def findmin(psr,method='Nelder-Mead',history=False,formbats=False,renormalize=True,bounds={},**kwargs)
|
Use scipy.optimize.minimize to find minimum-chisq timing solution,
passing through all extra options. Resets psr[...].val to the final solution,
and returns the final chisq. Will use chisq gradient if method requires it.
Ignores deleted points.
| 3.326632
| 3.301216
| 1.007699
|
mask = psr.deleted == 0
res, err = psr.residuals(removemean=False)[mask], psr.toaerrs[mask]
M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[mask,:]
C = numpy.diag((err * 1e-6)**2)
if renormalize:
norm = numpy.sqrt(numpy.sum(M**2,axis=0))
M /= norm
else:
norm = np.ones_like(M[0,:])
mtcm = numpy.dot(M.T,numpy.dot(numpy.linalg.inv(C),M))
mtcy = numpy.dot(M.T,numpy.dot(numpy.linalg.inv(C),res))
xvar = numpy.linalg.inv(mtcm)
c = scipy.linalg.cho_factor(mtcm)
xhat = scipy.linalg.cho_solve(c,mtcy)
sol = psr.vals()
psr.vals(sol + xhat[1:] / norm[1:])
psr.errs(numpy.sqrt(numpy.diag(xvar)[1:]) / norm[1:])
return chisq(psr)
|
def glsfit(psr,renormalize=True)
|
Solve local GLS problem using scipy.linalg.cholesky.
Update psr[...].val and psr[...].err from solution.
If renormalize=True, normalize each design-matrix column by its norm.
| 4.627914
| 4.402709
| 1.051151
|
N = len(t)
F = np.zeros((N, 2 * nmodes))
if Tspan is not None:
T = Tspan
else:
T = t.max() - t.min()
# define sampling frequencies
if fmin is not None and fmax is not None:
f = np.linspace(fmin, fmax, nmodes)
else:
f = np.linspace(1 / T, nmodes / T, nmodes)
if logf:
f = np.logspace(np.log10(1 / T), np.log10(nmodes / T), nmodes)
Ffreqs = np.zeros(2 * nmodes)
Ffreqs[0::2] = f
Ffreqs[1::2] = f
F[:,::2] = np.sin(2*np.pi*t[:,None]*f[None,:])
F[:,1::2] = np.cos(2*np.pi*t[:,None]*f[None,:])
if freq:
return F, Ffreqs
else:
return F
|
def create_fourier_design_matrix(t, nmodes, freq=False, Tspan=None,
logf=False, fmin=None, fmax=None)
|
Construct fourier design matrix from eq 11 of Lentati et al, 2013
:param t: vector of time series in seconds
:param nmodes: number of fourier coefficients to use
:param freq: option to output frequencies
:param Tspan: option to some other Tspan
:param logf: use log frequency spacing
:param fmin: lower sampling frequency
:param fmax: upper sampling frequency
:return: F: fourier design matrix
:return: f: Sampling frequencies (if freq=True)
| 1.851114
| 1.781699
| 1.03896
|
fyr = 1 / 3.16e7
return (10**log10_A)**2 / 12.0 / np.pi**2 * fyr**(gamma-3) * f**(-gamma)
|
def powerlaw(f, log10_A=-16, gamma=5)
|
Power-law PSD.
:param f: Sampling frequencies
:param log10_A: log10 of red noise Amplitude [GW units]
:param gamma: Spectral index of red noise process
| 7.259807
| 7.483982
| 0.970046
|
gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing)
gwb.add_gwb(psr,dist)
return gwb
|
def add_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5,
gwAmp=1e-20, alpha=-0.66, logspacing=True)
|
Add a stochastic background from inspiraling binaries, using the tempo2
code that underlies the GWbkgrd plugin.
Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries,
'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator,
'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha'
determine its amplitude and exponent, and setting 'logspacing' to False
will use linear spacing for the individual sources.
It is also possible to create a background object with
gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing)
then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a
consistent background for multiple pulsars.
Returns the GWB object
| 2.906843
| 2.268971
| 1.281128
|
gwb = GWB(ngw, seed, flow, fhigh, gwAmp, alpha, logspacing,
dipoleamps, dipoledir, dipolemag)
gwb.add_gwb(psr,dist)
return gwb
|
def add_dipole_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8,
fhigh=1e-5, gwAmp=1e-20, alpha=-0.66,
logspacing=True, dipoleamps=None,
dipoledir=None, dipolemag=None)
|
Add a stochastic background from inspiraling binaries distributed
according to a pure dipole distribution, using the tempo2
code that underlies the GWdipolebkgrd plugin.
The basic use is identical to that of 'add_gwb':
Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries,
'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator,
'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha'
determine its amplitude and exponent, and setting 'logspacing' to False
will use linear spacing for the individual sources.
Additionally, the dipole component can be specified by using one of two
methods:
1) Specify the dipole direction as three dipole amplitudes, in the vector
dipoleamps
2) Specify the direction of the dipole as a magnitude dipolemag, and a vector
dipoledir=[dipolephi, dipoletheta]
It is also possible to create a background object with
gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing)
then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a
consistent background for multiple pulsars.
Returns the GWB object
| 2.66955
| 2.314824
| 1.153241
|
import tempfile
outfile = tempfile.NamedTemporaryFile(delete=False)
outfile.write(b'FORMAT 1\n')
outfile.write(b'MODE 1\n')
obsname = 'fake_' + os.path.basename(parfile)
if obsname[-4:] == '.par':
obsname = obsname[:-4]
for i,t in enumerate(obstimes):
outfile.write('{0} {1} {2} {3} {4} {5}\n'.format(
obsname,_geti(freq,i),t,_geti(toaerr,i),_geti(observatory,i),_geti(flags,i)
).encode('ascii'))
timfile = outfile.name
outfile.close()
pulsar = libstempo.tempopulsar(parfile,timfile,dofit=False)
for i in range(iters):
pulsar.stoas[:] -= pulsar.residuals() / 86400.0
pulsar.formbats()
os.remove(timfile)
return pulsar
|
def fakepulsar(parfile, obstimes, toaerr, freq=1440.0, observatory='AXIS',
flags='', iters=3)
|
Returns a libstempo tempopulsar object corresponding to a noiseless set
of observations for the pulsar specified in 'parfile', with observations
happening at times (MJD) given in the array (or list) 'obstimes', with
measurement errors given by toaerr (us).
A new timfile can then be saved with pulsar.savetim(). Re the other parameters:
- 'toaerr' needs to be either a common error, or a list of errors
of the same length of 'obstimes';
- 'freq' can be either a common observation frequency in MHz, or a list;
it defaults to 1440;
- 'observatory' can be either a common observatory name, or a list;
it defaults to the IPTA MDC 'AXIS';
- 'flags' can be a string (such as '-sys EFF.EBPP.1360') or a list of strings;
it defaults to an empty string;
- 'iters' is the number of iterative removals of computed residuals from TOAs
(which is how the fake pulsar is made...)
| 3.926388
| 3.407157
| 1.152394
|
if seed is not None:
N.random.seed(seed)
# default efacvec
efacvec = N.ones(psr.nobs)
# check that efac is scalar if flags is None
if flags is None:
if not N.isscalar(efac):
raise ValueError('ERROR: If flags is None, efac must be a scalar')
else:
efacvec = N.ones(psr.nobs) * efac
if flags is not None and flagid is not None and not N.isscalar(efac):
if len(efac) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == N.array(psr.flagvals(flagid))
efacvec[ind] = efac[ct]
psr.stoas[:] += efacvec * psr.toaerrs * (1e-6 / day) * N.random.randn(psr.nobs)
|
def add_efac(psr, efac=1.0, flagid=None, flags=None, seed=None)
|
Add nominal TOA errors, multiplied by `efac` factor.
Optionally take a pseudorandom-number-generator seed.
| 3.664459
| 3.552543
| 1.031503
|
if seed is not None:
N.random.seed(seed)
# default equadvec
equadvec = N.zeros(psr.nobs)
# check that equad is scalar if flags is None
if flags is None:
if not N.isscalar(equad):
raise ValueError('ERROR: If flags is None, equad must be a scalar')
else:
equadvec = N.ones(psr.nobs) * equad
if flags is not None and flagid is not None and not N.isscalar(equad):
if len(equad) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == N.array(psr.flagvals(flagid))
equadvec[ind] = equad[ct]
psr.stoas[:] += (equadvec / day) * N.random.randn(psr.nobs)
|
def add_equad(psr, equad, flagid=None, flags=None, seed=None)
|
Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed.
| 3.817026
| 3.771442
| 1.012087
|
if seed is not None:
N.random.seed(seed)
if flags is None:
t, U = quantize_fast(N.array(psr.toas(),'d'), dt=coarsegrain)
elif flags is not None and flagid is not None:
t, f, U = quantize_fast(N.array(psr.toas(),'d'),
N.array(psr.flagvals(flagid)),
dt=coarsegrain)
# default jitter value
ecorrvec = N.zeros(len(t))
# check that jitter is scalar if flags is None
if flags is None:
if not N.isscalar(ecorr):
raise ValueError('ERROR: If flags is None, jitter must be a scalar')
else:
ecorrvec = N.ones(len(t)) * ecorr
if flags is not None and flagid is not None and not N.isscalar(ecorr):
if len(ecorr) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == N.array(f)
ecorrvec[ind] = ecorr[ct]
psr.stoas[:] += (1 / day) * N.dot(U*ecorrvec, N.random.randn(U.shape[1]))
|
def add_jitter(psr, ecorr ,flagid=None, flags=None, coarsegrain=0.1,
seed=None)
|
Add correlated quadrature noise of rms `ecorr` [s],
with coarse-graining time `coarsegrain` [days].
Optionally take a pseudorandom-number-generator seed.
| 3.541099
| 3.515858
| 1.007179
|
if seed is not None:
N.random.seed(seed)
t = psr.toas()
minx, maxx = N.min(t), N.max(t)
x = (t - minx) / (maxx - minx)
T = (day/year) * (maxx - minx)
size = 2*components
F = N.zeros((psr.nobs,size),'d')
f = N.zeros(size,'d')
for i in range(components):
F[:,2*i] = N.cos(2*math.pi*(i+1)*x)
F[:,2*i+1] = N.sin(2*math.pi*(i+1)*x)
f[2*i] = f[2*i+1] = (i+1) / T
norm = A**2 * year**2 / (12 * math.pi**2 * T)
prior = norm * f**(-gamma)
y = N.sqrt(prior) * N.random.randn(size)
psr.stoas[:] += (1.0/day) * N.dot(F,y)
|
def add_rednoise(psr,A,gamma,components=10,seed=None)
|
Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma,
using `components` Fourier bases.
Optionally take a pseudorandom-number-generator seed.
| 3.527917
| 3.386359
| 1.041803
|
t = psr.toas()
t0 = offset * (N.max(t) - N.min(t))
sine = A * N.cos(2 * math.pi * f * day * (t - t0))
psr.stoas[:] += sine / day
|
def add_line(psr,f,A,offset=0.5)
|
Add a line of frequency `f` [Hz] and amplitude `A` [s],
with origin at a fraction `offset` through the dataset.
| 8.837185
| 8.100924
| 1.090886
|
# Define the heaviside function
heaviside = lambda x: 0.5 * (N.sign(x) + 1)
# Glitches are spontaneous spin-up events.
# Thus TOAs will be advanced, and resiudals will be negative.
psr.stoas[:] -= amp * heaviside(psr.toas() - epoch) * \
(psr.toas() - epoch)*86400.0
|
def add_glitch(psr, epoch, amp)
|
Like pulsar term BWM event, but now differently parameterized: just an
amplitude (not log-amp) parameter, and an epoch. [source: piccard]
:param psr: pulsar object
:param epoch: TOA time (MJD) the burst hits the earth
:param amp: amplitude of the glitch
| 10.579196
| 10.33579
| 1.02355
|
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
if x < xs[0]:
return ys[0] # +(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])
elif x > xs[-1]:
return ys[-1] # +(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])
else:
return interpolator(x)
def ufunclike(xs):
return N.array(map(pointwise, N.array(xs)))
return ufunclike
|
def extrap1d(interpolator)
|
Function to extend an interpolation function to an
extrapolation function.
:param interpolator: scipy interp1d object
:returns ufunclike: extension of function to extrapolation
| 2.065562
| 2.085175
| 0.990594
|
# begin loop over all pulsar pairs and calculate ORF
npsr = len(psr)
ORF = N.zeros((npsr, npsr))
phati = N.zeros(3)
phatj = N.zeros(3)
ptheta = [N.pi/2 - p['DECJ'].val for p in psr]
pphi = [p['RAJ'].val for p in psr]
for ll in range(0, npsr):
phati[0] = N.cos(pphi[ll]) * N.sin(ptheta[ll])
phati[1] = N.sin(pphi[ll]) * N.sin(ptheta[ll])
phati[2] = N.cos(ptheta[ll])
for kk in range(0, npsr):
phatj[0] = N.cos(pphi[kk]) * N.sin(ptheta[kk])
phatj[1] = N.sin(pphi[kk]) * N.sin(ptheta[kk])
phatj[2] = N.cos(ptheta[kk])
if ll != kk:
xip = (1.-N.sum(phati*phatj)) / 2.
ORF[ll, kk] = 3.*( 1./3. + xip * ( N.log(xip) -1./6.) )
else:
ORF[ll, kk] = 2.0
return ORF
|
def computeORFMatrix(psr)
|
Compute ORF matrix.
:param psr: List of pulsar object instances
:returns: Matrix that has the ORF values for every pulsar
pair with 2 on the diagonals to account for the
pulsar term.
| 2.592989
| 2.521901
| 1.028188
|
res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs
if (not deleted) and N.any(psr.deleted != 0):
res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0]
print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs))
meanres = math.sqrt(N.mean(res**2)) / 1e-6
if group is None:
i = N.argsort(t)
P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs)
else:
if (not deleted) and N.any(psr.deleted):
flagmask = psr.flagvals(group)[~psr.deleted]
else:
flagmask = psr.flagvals(group)
unique = list(set(flagmask))
for flagval in unique:
f = (flagmask == flagval)
flagres, flagt, flagerrs = res[f], t[f], errs[f]
i = N.argsort(flagt)
P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs)
P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1))
P.xlabel('MJD'); P.ylabel('res [us]')
P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
|
def plotres(psr,deleted=False,group=None,**kwargs)
|
Plot residuals, compute unweighted rms residual.
| 2.988965
| 2.922713
| 1.022668
|
theta, phi, omega, polarization = gwb.gw_dist()
rho = phi-N.pi
eta = 0.5*N.pi - theta
# I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014:
# /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485:
# RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2))
#old_settings = N.seterr(invalid='ignore')
P.title("GWB source population")
ax = P.axes(projection='mollweide')
foo = P.scatter(rho, eta, marker='.', s=1)
#bar = N.seterr(**old_settings)
return foo
|
def plotgwsrc(gwb)
|
Plot a GWB source population as a mollweide projection.
| 8.166979
| 7.338881
| 1.112837
|
mask = Mask(pulsar,usedeleted)
err = 1.0e-6 * mask(pulsar.toaerrs)
Cdiag = (efac*err)**2
if equad:
Cdiag = Cdiag + (1e-6*equad)**2 * N.ones(len(err))
if Ared:
redf, F = _setuprednoise(pulsar,redcomponents)
F = mask(F)
phi = Ared**2 * redf**(-gammared)
if jitter:
# quantize at 1 second; U plays the role of redF
t, U = _quantize(86400.0 * mask(pulsar.toas()),1.0)
phi_j = (1e-6*jitter)**2 * N.ones(U.shape[1])
# stack the basis arrays if we're also doing red noise
phi = N.hstack((phi,phi_j)) if Ared else phi_j
F = N.hstack((F,U)) if Ared else U
if Ared or jitter:
# Lentati formulation for correlated noise
invphi = N.diag(1/phi)
Ninv = N.diag(1/Cdiag)
NinvF = dot(Ninv,F) # could be accelerated
X = invphi + dot(F.T,NinvF) # invphi + FTNinvF
Cinv = Ninv - dot(NinvF,N.linalg.inv(X),NinvF.T)
logCdet = N.sum(N.log(Cdiag)) + N.sum(N.log(phi)) + N.linalg.slogdet(X)[1] # check
else:
# noise is all diagonal
Cinv = N.diag(1/Cdiag)
logCdet = N.sum(N.log(Cdiag))
if marginalize:
M = mask(pulsar.designmatrix())
res = mask(N.array(pulsar.residuals(updatebats=False),'d'))
CinvM = N.dot(Cinv,M)
A = dot(M.T,CinvM)
invA = N.linalg.inv(A)
CinvMres = dot(res,CinvM)
ret = -0.5 * dot(res,Cinv,res) + 0.5 * dot(CinvMres,invA,CinvMres.T)
if normalize:
ret = ret - 0.5 * logCdet - 0.5 * N.linalg.slogdet(A)[1] - 0.5 * (M.shape[0] - M.shape[1]) * math.log(2.0*math.pi)
else:
res = mask(N.array(pulsar.residuals(),'d'))
ret = -0.5 * dot(res,Cinv,res)
if normalize:
ret = ret - 0.5 * logCdet - 0.5 * len(res) * math.log(2.0*math.pi)
return ret
|
def loglike(pulsar,efac=1.0,equad=None,jitter=None,Ared=None,gammared=None,marginalize=True,normalize=True,redcomponents=10,usedeleted=True)
|
Returns the Gaussian-process likelihood for 'pulsar'.
The likelihood is evaluated at the current value of the pulsar parameters,
as given by pulsar[parname].val.
If efac, equad, and/or Ared are set, will compute the likelihood assuming
the corresponding noise model. EFAC multiplies measurement noise;
EQUAD adds in quadrature, and is given in us; red-noise is specified with
the GW-like dimensionless amplitude Ared and exponent gamma, and is
modeled with 'redcomponents' Fourier components.
If marginalize=True (the default), loglike will marginalize over all the
parameters in pulsar.fitpars, using an M-matrix formulation.
| 4.142987
| 4.179834
| 0.991185
|
ret = []
for par in parlist:
# match anything of the form XXX{number1-number2}
m = re.match('(.*)\{([0-9]+)\-([0-9]+)\}',par)
if m is None:
ret.append(par)
else:
# (these are strings)
root, number1, number2 = m.group(1), m.group(2), m.group(3)
# if number1 begins with 0s, number parameters as 00, 01, 02, ...,
# otherwise go with 0, 1, 2, ...
fmt = '{{0}}{{1:0{0}d}}'.format(len(number1)) if number1[0] == '0' else '{0}{1:d}'
ret = ret + [fmt.format(root,i) for i in range(int(m.group(2)),int(m.group(3))+1)]
return ret
|
def expandranges(parlist)
|
Rewrite a list of parameters by expanding ranges (e.g., log10_efac{1-10}) into
individual parameters.
| 3.585795
| 3.445779
| 1.040634
|
rootdict = {root: [] for root in roots}
res = []
for par in parlist:
found = False
for root in roots:
if len(par) > len(root) and par[:len(root)] == root:
rootdict[root].append(int(par[len(root):]))
found = True
if not found:
res.append(par)
for root in roots:
if rootdict[root]:
if len(rootdict[root]) > 1:
rmin, rmax = min(rootdict[root]), max(rootdict[root])
res.append('{0}{{{1}-{2}}}{3}'.format(root,rmin,rmax,
'(incomplete)' if rmax - rmin != len(rootdict[root]) - 1 else ''))
else:
res.append('{0}{1}'.format(root,rootdict[root][0]))
return res
|
def _findrange(parlist,roots=['JUMP','DMXR1_','DMXR2_','DMX_','efac','log10_efac'])
|
Rewrite a list of parameters name by detecting ranges (e.g., JUMP1, JUMP2, ...)
and compressing them.
| 2.122473
| 2.120648
| 1.000861
|
w,s,d = data.chains.shape
start = int((1.0 - fraction) * s)
total = int((s - start) / skip)
return data.chains[:,start::skip,:].reshape((w*total,d))
|
def merge(data,skip=50,fraction=1.0)
|
Merge one every 'skip' clouds into a single emcee population,
using the later 'fraction' of the run.
| 6.791086
| 6.872403
| 0.988168
|
ret = ret[ret[:,index] > min,:]
if max is not None:
ret = ret[ret[:,index] < max,:]
return ret
|
def cull(data,index,min=None,max=None):
ret = data
if min is not None
|
Sieve an emcee clouds by excluding walkers with search variable 'index'
smaller than 'min' or larger than 'max'.
| 4.050076
| 3.180219
| 1.273521
|
pth = resource_filename(Requirement.parse('libstempo'),
'libstempo/ecc_vs_nharm.txt')
fil = np.loadtxt(pth)
return interp1d(fil[:,0], fil[:,1])
|
def make_ecc_interpolant()
|
Make interpolation function from eccentricity file to
determine number of harmonics to use for a given
eccentricity.
:returns: interpolant
| 7.911838
| 8.513092
| 0.929373
|
# chirp mass
mc *= SOLAR2S
dedt = -304/(15*mc) * (2*np.pi*mc*F)**(8/3) * e * \
(1 + 121/304*e**2) / ((1-e**2)**(5/2))
return dedt
|
def get_edot(F, mc, e)
|
Compute eccentricity derivative from Taylor et al. (2015)
:param F: Orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param e: Eccentricity of binary
:returns: de/dt
| 8.911454
| 7.547956
| 1.180645
|
# chirp mass
mc *= SOLAR2S
dFdt = 48 / (5*np.pi*mc**2) * (2*np.pi*mc*F)**(11/3) * \
(1 + 73/24*e**2 + 37/96*e**4) / ((1-e**2)**(7/2))
return dFdt
|
def get_Fdot(F, mc, e)
|
Compute frequency derivative from Taylor et al. (2015)
:param F: Orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param e: Eccentricity of binary
:returns: dF/dt
| 6.964586
| 6.03554
| 1.153929
|
# chirp mass
mc *= SOLAR2S
#total mass
m = (((1+q)**2)/q)**(3/5) * mc
dgdt = 6*np.pi*F * (2*np.pi*F*m)**(2/3) / (1-e**2) * \
(1 + 0.25*(2*np.pi*F*m)**(2/3)/(1-e**2)*(26-15*e**2))
return dgdt
|
def get_gammadot(F, mc, q, e)
|
Compute gamma dot from Barack and Cutler (2004)
:param F: Orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param q: Mass ratio of binary
:param e: Eccentricity of binary
:returns: dgamma/dt
| 6.516987
| 5.963099
| 1.092886
|
F = y[0]
e = y[1]
gamma = y[2]
phase = y[3]
#total mass
m = (((1+q)**2)/q)**(3/5) * mc
dFdt = get_Fdot(F, mc, e)
dedt = get_edot(F, mc, e)
dgdt = get_gammadot(F, mc, q, e)
dphasedt = 2*np.pi*F
return np.array([dFdt, dedt, dgdt, dphasedt])
|
def get_coupled_ecc_eqns(y, t, mc, q)
|
Computes the coupled system of differential
equations from Peters (1964) and Barack &
Cutler (2004). This is a system of three variables:
F: Orbital frequency [Hz]
e: Orbital eccentricity
gamma: Angle of precession of periastron [rad]
phase0: Orbital phase [rad]
:param y: Vector of input parameters [F, e, gamma]
:param t: Time [s]
:param mc: Chirp mass of binary [Solar Mass]
:param q: Mass ratio of binary
:returns: array of derivatives [dF/dt, de/dt, dgamma/dt, dphase/dt]
| 3.937808
| 3.522922
| 1.117767
|
y0 = np.array([F0, e0, gamma0, phase0])
y, infodict = odeint(get_coupled_ecc_eqns, y0, t, args=(mc,q), full_output=True)
if infodict['message'] == 'Integration successful.':
ret = y
else:
ret = 0
return ret
|
def solve_coupled_ecc_solution(F0, e0, gamma0, phase0, mc, q, t)
|
Compute the solution to the coupled system of equations
from from Peters (1964) and Barack & Cutler (2004) at
a given time.
:param F0: Initial orbital frequency [Hz]
:param e0: Initial orbital eccentricity
:param gamma0: Initial angle of precession of periastron [rad]
:param mc: Chirp mass of binary [Solar Mass]
:param q: Mass ratio of binary
:param t: Time at which to evaluate solution [s]
:returns: (F(t), e(t), gamma(t), phase(t))
| 2.944435
| 3.174592
| 0.9275
|
# convert to seconds
mc *= SOLAR2S
dl *= MPC2S
omega = 2 * np.pi * F
amp = n * mc**(5/3) * omega**(2/3) / dl
ret = -amp * (ss.jn(n-2,n*e) - 2*e*ss.jn(n-1,n*e) +
(2/n)*ss.jn(n,n*e) + 2*e*ss.jn(n+1,n*e) -
ss.jn(n+2,n*e))
return ret
|
def get_an(n, mc, dl, F, e)
|
Compute a_n from Eq. 22 of Taylor et al. (2015).
:param n: Harmonic number
:param mc: Chirp mass of binary [Solar Mass]
:param dl: Luminosity distance [Mpc]
:param F: Orbital frequency of binary [Hz]
:param e: Orbital Eccentricity
:returns: a_n
| 4.529318
| 4.167187
| 1.0869
|
# convert to seconds
mc *= SOLAR2S
dl *= MPC2S
omega = 2 * np.pi * F
amp = n * mc**(5/3) * omega**(2/3) / dl
ret = -amp * np.sqrt(1-e**2) *(ss.jn(n-2,n*e) - 2*ss.jn(n,n*e) +
ss.jn(n+2,n*e))
return ret
|
def get_bn(n, mc, dl, F, e)
|
Compute b_n from Eq. 22 of Taylor et al. (2015).
:param n: Harmonic number
:param mc: Chirp mass of binary [Solar Mass]
:param dl: Luminosity distance [Mpc]
:param F: Orbital frequency of binary [Hz]
:param e: Orbital Eccentricity
:returns: b_n
| 7.819019
| 6.88624
| 1.135455
|
# convert to seconds
mc *= SOLAR2S
dl *= MPC2S
omega = 2 * np.pi * F
amp = 2 * mc**(5/3) * omega**(2/3) / dl
ret = amp * ss.jn(n,n*e) / (n * omega)
return ret
|
def get_cn(n, mc, dl, F, e)
|
Compute c_n from Eq. 22 of Taylor et al. (2015).
:param n: Harmonic number
:param mc: Chirp mass of binary [Solar Mass]
:param dl: Luminosity distance [Mpc]
:param F: Orbital frequency of binary [Hz]
:param e: Orbital Eccentricity
:returns: c_n
| 9.759679
| 8.879027
| 1.099183
|
n = np.arange(1, nmax)
# time dependent amplitudes
an = get_an(n, mc, dl, F, e)
bn = get_bn(n, mc, dl, F, e)
cn = get_cn(n, mc, dl, F, e)
# time dependent terms
omega = 2*np.pi*F
gt = gamma + gammadot * t
lt = l0 + omega * t
# tiled phase
phase1 = n * np.tile(lt, (nmax-1,1)).T
phase2 = np.tile(gt, (nmax-1,1)).T
phasep = phase1 + 2*phase2
phasem = phase1 - 2*phase2
# intermediate terms
sp = np.sin(phasem)/(n*omega-2*gammadot) + \
np.sin(phasep)/(n*omega+2*gammadot)
sm = np.sin(phasem)/(n*omega-2*gammadot) - \
np.sin(phasep)/(n*omega+2*gammadot)
cp = np.cos(phasem)/(n*omega-2*gammadot) + \
np.cos(phasep)/(n*omega+2*gammadot)
cm = np.cos(phasem)/(n*omega-2*gammadot) - \
np.cos(phasep)/(n*omega+2*gammadot)
splus_n = -0.5 * (1+np.cos(inc)**2) * (an*sp - bn*sm) + \
(1-np.cos(inc)**2)*cn * np.sin(phase1)
scross_n = np.cos(inc) * (an*cm - bn*cp)
return np.sum(splus_n, axis=1), np.sum(scross_n, axis=1)
|
def calculate_splus_scross(nmax, mc, dl, F, e, t, l0, gamma, gammadot, inc)
|
Calculate splus and scross summed over all harmonics.
This waveform differs slightly from that in Taylor et al (2015)
in that it includes the time dependence of the advance of periastron.
:param nmax: Total number of harmonics to use
:param mc: Chirp mass of binary [Solar Mass]
:param dl: Luminosity distance [Mpc]
:param F: Orbital frequency of binary [Hz]
:param e: Orbital Eccentricity
:param t: TOAs [s]
:param l0: Initial eccentric anomoly [rad]
:param gamma: Angle of periastron advance [rad]
:param gammadot: Time derivative of angle of periastron advance [rad/s]
:param inc: Inclination angle [rad]
| 2.436737
| 2.390002
| 1.019554
|
y = dt[response]
seq = dt[sequence]
if trim_seq_len is not None:
seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len)
seq = [s.replace("N", "") for s in seq]
dt_kmer = kmer_count(seq, k)
Xsp = csc_matrix(dt_kmer)
en = ElasticNet(alpha=1, standardize=False, n_splits=3)
en.fit(Xsp, y)
# which coefficients are nonzero?=
nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist()
# perform stepwise selection
#
# TODO - how do we deal with the intercept?
# largest number of motifs where they don't differ by more than 1 k-mer
def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True):
F, pval = f_regression(dt_kmer[to_be_selected_kmers], y)
kmer = to_be_selected_kmers.pop(pval.argmin())
selected_kmers.append(kmer)
def select_criterion(s1, s2, consider_shift=True):
if hamming_distance(s1, s2) <= 1:
return False
if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0:
return False
if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0:
return False
return True
to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers
if select_criterion(ckmer, kmer, consider_shift)]
if len(to_be_selected_kmers) == 0:
return selected_kmers
else:
# regress out the new feature
lm = LinearRegression()
lm.fit(dt_kmer[selected_kmers], y)
y_new = y - lm.predict(dt_kmer[selected_kmers])
return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift)
selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift)
return selected_kmers
|
def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1,
seq_align="start", trim_seq_len=None)
|
Find best k-mers for CONCISE initialization.
Args:
dt (pd.DataFrame): Table containing response variable and sequence.
response (str): Name of the column used as the reponse variable.
sequence (str): Name of the column storing the DNA/RNA sequences.
k (int): Desired k-mer length.
n_cores (int): Number of cores to use for computation. It can use up to 3 cores.
consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG?
seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences?
trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered.
Returns:
string list: Best set of motifs for this dataset sorted with respect to
confidence (best candidate occuring first).
Details:
First a lasso model gets fitted to get a set of initial motifs. Next, the best
subset of unrelated motifs is selected by stepwise selection.
| 3.016271
| 2.919362
| 1.033195
|
# generate all k-mers
all_kmers = generate_all_kmers(k)
kmer_count_list = []
for seq in seq_list:
kmer_count_list.append([seq.count(kmer) for kmer in all_kmers])
return pd.DataFrame(kmer_count_list, columns=all_kmers)
|
def kmer_count(seq_list, k)
|
Generate k-mer counts from a set of sequences
Args:
seq_list (iterable): List of DNA sequences (with letters from {A, C, G, T})
k (int): K in k-mer.
Returns:
pandas.DataFrame: Count matrix for seach sequence in seq_list
Example:
>>> kmer_count(["ACGTTAT", "GACGCGA"], 2)
AA AC AG AT CA CC CG CT GA GC GG GT TA TC TG TT
0 0 1 0 1 0 0 1 0 0 0 0 1 1 0 0 1
1 0 1 0 0 0 0 2 0 2 1 0 0 0 0 0 0
| 2.04846
| 2.641321
| 0.775544
|
bases = ['A', 'C', 'G', 'T']
return [''.join(p) for p in itertools.product(bases, repeat=k)]
|
def generate_all_kmers(k)
|
Generate all possible k-mers
Example:
>>> generate_all_kmers(2)
['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT']
| 2.686441
| 3.240941
| 0.828908
|
return {key: np.asarray(value) if value is not None else None for key, value in obj_dict.items()}
|
def dict_to_numpy_dict(obj_dict)
|
Convert a dictionary of lists into a dictionary of numpy arrays
| 2.989968
| 2.666544
| 1.121289
|
if type(obj_dict) == dict:
return {key: rec_dict_to_numpy_dict(value) if value is not None else None for key, value in obj_dict.items()}
elif obj_dict is None:
return None
else:
return np.asarray(obj_dict)
|
def rec_dict_to_numpy_dict(obj_dict)
|
Same as dict_to_numpy_dict, but recursive
| 1.965529
| 1.946673
| 1.009686
|
if type(a) != type(b) and type(a) != np.ndarray and type(b) != np.ndarray:
return False
# go through a dictionary
if type(a) == dict and type(b) == dict:
if not a.keys() == b.keys():
return False
for key in a.keys():
res = compare_numpy_dict(a[key], b[key], exact)
if res == False:
print("false for key = ", key)
return False
return True
# if type(a) == np.ndarray and type(b) == np.ndarray:
if type(a) == np.ndarray or type(b) == np.ndarray:
if exact:
return (a == b).all()
else:
return np.testing.assert_almost_equal(a, b)
if a is None and b is None:
return True
raise NotImplementedError
|
def compare_numpy_dict(a, b, exact=True)
|
Compare two recursive numpy dictionaries
| 2.047643
| 1.995835
| 1.025958
|
# make sure n_bases is an int
assert type(n_bases) == int
x = np.arange(start, end + 1)
knots = get_knots(start, end, n_bases, spline_order)
X_splines = get_X_spline(x, knots, n_bases, spline_order, add_intercept)
S = get_S(n_bases, spline_order, add_intercept)
# Get the same knot positions as with mgcv
# https://github.com/cran/mgcv/blob/master/R/smooth.r#L1560
return X_splines, S, knots
|
def get_gam_splines(start=0, end=100, n_bases=10, spline_order=3, add_intercept=True)
|
Main function required by (TF)Concise class
| 3.652989
| 3.666203
| 0.996396
|
x_range = end - start
start = start - x_range * 0.001
end = end + x_range * 0.001
# mgcv annotation
m = spline_order - 1
nk = n_bases - m # number of interior knots
dknots = (end - start) / (nk - 1)
knots = np.linspace(start=start - dknots * (m + 1),
stop=end + dknots * (m + 1),
num=nk + 2 * m + 2)
return knots.astype(np.float32)
|
def get_knots(start, end, n_bases=10, spline_order=3)
|
Arguments:
x; np.array of dim 1
| 3.513395
| 3.453295
| 1.017404
|
if len(x.shape) is not 1:
raise ValueError("x has to be 1 dimentional")
tck = [knots, np.zeros(n_bases), spline_order]
X = np.zeros([len(x), n_bases])
for i in range(n_bases):
vec = np.zeros(n_bases)
vec[i] = 1.0
tck[1] = vec
X[:, i] = si.splev(x, tck, der=0)
if add_intercept is True:
ones = np.ones_like(X[:, :1])
X = np.hstack([ones, X])
return X.astype(np.float32)
|
def get_X_spline(x, knots, n_bases=10, spline_order=3, add_intercept=True)
|
Returns:
np.array of shape [len(x), n_bases + (add_intercept)]
# BSpline formula
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline
Fortran code:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f
| 2.381912
| 2.394452
| 0.994763
|
S = self.S
if add_intercept is True:
# S <- cbind(0, rbind(0, S)) # in R
zeros = np.zeros_like(S[:1, :])
S = np.vstack([zeros, S])
zeros = np.zeros_like(S[:, :1])
S = np.hstack([zeros, S])
return S
|
def getS(self, add_intercept=False)
|
Get the penalty matrix S
Returns
np.array, of shape (n_bases + add_intercept, n_bases + add_intercept)
| 3.629297
| 3.753212
| 0.966984
|
# sanity check
if x.min() < self.start:
raise Warning("x.min() < self.start")
if x.max() > self.end:
raise Warning("x.max() > self.end")
return get_X_spline(x=x,
knots=self.knots,
n_bases=self.n_bases,
spline_order=self.spline_order,
add_intercept=add_intercept)
|
def predict(self, x, add_intercept=False)
|
For some x, predict the bn(x) for each base
Arguments:
x: np.array; Vector of dimension 1
add_intercept: bool; should we add the intercept to the final array
Returns:
np.array, of shape (len(x), n_bases + (add_intercept))
| 3.068095
| 3.000234
| 1.022619
|
motifs = _load_motifs()
motif_names = sorted(list(motifs.keys()))
df = pd.Series(motif_names).str.split(expand=True)
df.rename(columns={0: "PWM_id", 1: "info1", 2: "info2"}, inplace=True)
# compute the consensus
consensus = pd.Series([PWM(motifs[m]).get_consensus() for m in motif_names])
df["consensus"] = consensus
return df
|
def get_metadata()
|
Get pandas.DataFrame with metadata about the PWM's. Columns:
- PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm
- info1 - additional information about the motifs
- info2
- consensus: PWM consensus sequence
| 3.782809
| 3.065827
| 1.233863
|
l = _load_motifs()
l = {k.split()[0]: v for k, v in l.items()}
pwm_list = [PWM(l[m] + pseudocountProb, name=m) for m in motif_name_list]
return pwm_list
|
def get_pwm_list(motif_name_list, pseudocountProb=0.0001)
|
Get a list of ENCODE PWM's.
# Arguments
pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table
pseudocountProb: Added pseudocount probabilities to the PWM
# Returns
List of `concise.utils.pwm.PWM` instances.
| 3.302644
| 3.790553
| 0.871283
|
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = y_true.round()
if len(y_true) == 0 or len(np.unique(y_true)) < 2:
return np.nan
return skm.roc_auc_score(y_true, y_pred)
|
def auc(y_true, y_pred, round=True)
|
Area under the ROC curve
| 2.380681
| 2.482792
| 0.958872
|
y_true, y_pred = _mask_value_nan(y_true, y_pred)
precision, recall, _ = skm.precision_recall_curve(y_true, y_pred)
return skm.auc(recall, precision)
|
def auprc(y_true, y_pred)
|
Area under the precision-recall curve
| 2.51999
| 2.4927
| 1.010948
|
y_true, y_pred = _mask_value_nan(y_true, y_pred)
precision, recall, _ = skm.precision_recall_curve(y_true, y_pred)
return recall[np.searchsorted(precision - precision, 0)]
|
def recall_at_precision(y_true, y_pred, precision)
|
Recall at a certain precision threshold
Args:
y_true: true labels
y_pred: predicted labels
precision: resired precision level at which where to compute the recall
| 3.671485
| 4.812247
| 0.762946
|
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = np.round(y_true)
y_pred = np.round(y_pred)
return skm.accuracy_score(y_true, y_pred)
|
def accuracy(y_true, y_pred, round=True)
|
Classification accuracy
| 1.99
| 2.091003
| 0.951696
|
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = np.round(y_true)
y_pred = np.round(y_pred)
return skm.recall_score(y_true, y_pred)
|
def tpr(y_true, y_pred, round=True)
|
True positive rate `tp / (tp + fn)`
| 2.093853
| 2.177326
| 0.961663
|
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = np.round(y_true)
y_pred = np.round(y_pred)
c = skm.confusion_matrix(y_true, y_pred)
return c[0, 0] / c[0].sum()
|
def tnr(y_true, y_pred, round=True)
|
True negative rate `tn / (tn + fp)`
| 2.215483
| 2.283389
| 0.970261
|
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = np.round(y_true)
y_pred = np.round(y_pred)
return skm.matthews_corrcoef(y_true, y_pred)
|
def mcc(y_true, y_pred, round=True)
|
Matthews correlation coefficient
| 1.909991
| 2.014993
| 0.947889
|
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = np.round(y_true)
y_pred = np.round(y_pred)
return skm.f1_score(y_true, y_pred)
|
def f1(y_true, y_pred, round=True)
|
F1 score: `2 * (p * r) / (p + r)`, where p=precision and r=recall.
| 1.992803
| 2.103372
| 0.947432
|
return np.mean(y_true.argmax(axis=1) == y_pred.argmax(axis=1))
|
def cat_acc(y_true, y_pred)
|
Categorical accuracy
| 2.320327
| 2.429452
| 0.955082
|
y_true, y_pred = _mask_nan(y_true, y_pred)
return np.corrcoef(y_true, y_pred)[0, 1]
|
def cor(y_true, y_pred)
|
Compute Pearson correlation coefficient.
| 2.450061
| 2.169463
| 1.12934
|
y_true, y_pred = _mask_nan(y_true, y_pred)
if len(y_true) > nb_sample:
idx = np.arange(len(y_true))
np.random.shuffle(idx)
idx = idx[:nb_sample]
y_true = y_true[idx]
y_pred = y_pred[idx]
return kendalltau(y_true, y_pred)[0]
|
def kendall(y_true, y_pred, nb_sample=100000)
|
Kendall's tau coefficient, Kendall rank correlation coefficient
| 1.648227
| 1.645519
| 1.001646
|
y_true, y_pred = _mask_nan(y_true, y_pred)
return np.mean(np.abs(y_true - y_pred))
|
def mad(y_true, y_pred)
|
Median absolute deviation
| 2.259136
| 2.374276
| 0.951505
|
y_true, y_pred = _mask_nan(y_true, y_pred)
return ((y_true - y_pred) ** 2).mean(axis=None)
|
def mse(y_true, y_pred)
|
Mean squared error
| 2.494634
| 2.652279
| 0.940563
|
y_true, y_pred = _mask_nan(y_true, y_pred)
var_resid = np.var(y_true - y_pred)
var_y_true = np.var(y_true)
return 1 - var_resid / var_y_true
|
def var_explained(y_true, y_pred)
|
Fraction of variance explained.
| 2.466934
| 2.521581
| 0.978328
|
def sample_log(myrange):
x = np.random.uniform(np.log10(myrange[0]), np.log10(myrange[1]))
return 10**x
def sample_unif(myrange):
x = np.random.uniform(myrange[0], myrange[1])
return x
def sample_set(myset):
x = random.sample(myset, 1)
return x[0]
def type_dep_sample(myrange):
if type(myrange) is list:
return sample_log(myrange)
if type(myrange) is tuple:
return sample_unif(myrange)
if type(myrange) is set:
return sample_set(myrange)
return myrange
return {k: type_dep_sample(v) for k, v in params.items()}
|
def sample_params(params)
|
Randomly sample hyper-parameters stored in a dictionary on a predefined range and scale.
Useful for hyper-parameter random search.
Args:
params (dict): hyper-parameters to sample. Dictionary value-type parsing:
- :python:`[1e3, 1e7]` - uniformly sample on a **log10** scale from the interval :python:`(1e3,1e7)`
- :python:`(1, 10)` - uniformly sample on a **normal** scale from the interval :python:`(1,10)`
- :python:`{1, 2}` - sample from a **set** of values.
- :python:`1` - don't sample
Returns:
dict: Dictionary with the same keys as :py:attr:`params`, but with only one element as the value.
Examples:
>>> myparams = {
"max_pool": True, # allways use True
"step_size": [0.09, 0.005],
"step_decay": (0.9, 1),
"n_splines": {10, None}, # use either 10 or None
"some_tuple": {(1,2), (1)},
}
>>> concise.sample_params(myparams)
{'step_decay': 0.9288, 'step_size': 0.0292, 'max_pool': True, 'n_splines': None, 'some_tuple': (1, 2)}
>>> concise.sample_params(myparams)
{'step_decay': 0.9243, 'step_size': 0.0293, 'max_pool': True, 'n_splines': None, 'some_tuple': (1)}
>>> concise.sample_params(myparams)
{'step_decay': 0.9460, 'step_size': 0.0301, 'max_pool': True, 'n_splines': 10, 'some_tuple': (1, 2)}
Note:
- :python:`{[1,2], [3,4]}` is invalid. Use :python:`{(1,2), (3,4)}` instead.
- You can allways use :python:`{}` with a single element to by-pass sampling.
| 2.152841
| 1.999972
| 1.076436
|
y = K.cast(K.round(y), K.floatx())
z = K.cast(K.round(z), K.floatx())
def count_matches(y, z):
return K.sum(K.cast(y, K.floatx()) * K.cast(z, K.floatx()))
ones = K.ones_like(y)
zeros = K.zeros_like(y)
y_ones = K.equal(y, ones)
y_zeros = K.equal(y, zeros)
z_ones = K.equal(z, ones)
z_zeros = K.equal(z, zeros)
tp = count_matches(y_ones, z_ones)
tn = count_matches(y_zeros, z_zeros)
fp = count_matches(y_zeros, z_ones)
fn = count_matches(y_ones, z_zeros)
return (tp, tn, fp, fn)
|
def contingency_table(y, z)
|
Note: if y and z are not rounded to 0 or 1, they are ignored
| 1.554733
| 1.527851
| 1.017594
|
tp, tn, fp, fn = contingency_table(y, z)
return tp / (tp + fn)
|
def tpr(y, z)
|
True positive rate `tp / (tp + fn)`
| 3.006089
| 2.639784
| 1.138763
|
tp, tn, fp, fn = contingency_table(y, z)
return tn / (tn + fp)
|
def tnr(y, z)
|
True negative rate `tn / (tn + fp)`
| 3.131956
| 2.883315
| 1.086234
|
tp, tn, fp, fn = contingency_table(y, z)
return fp / (fp + tn)
|
def fpr(y, z)
|
False positive rate `fp / (fp + tn)`
| 3.293185
| 2.927511
| 1.124909
|
tp, tn, fp, fn = contingency_table(y, z)
return fn / (fn + tp)
|
def fnr(y, z)
|
False negative rate `fn / (fn + tp)`
| 4.237265
| 3.010819
| 1.407346
|
tp, tn, fp, fn = contingency_table(y, z)
return tp / (tp + fp)
|
def precision(y, z)
|
Precision `tp / (tp + fp)`
| 3.018203
| 2.952487
| 1.022258
|
tp, tn, fp, fn = contingency_table(y, z)
return fp / (tp + fp)
|
def fdr(y, z)
|
False discovery rate `fp / (tp + fp)`
| 4.14774
| 3.095987
| 1.339715
|
tp, tn, fp, fn = contingency_table(y, z)
return (tp + tn) / (tp + tn + fp + fn)
|
def accuracy(y, z)
|
Classification accuracy `(tp + tn) / (tp + tn + fp + fn)`
| 2.620792
| 2.199545
| 1.191516
|
_recall = recall(y, z)
_prec = precision(y, z)
return 2 * (_prec * _recall) / (_prec + _recall)
|
def f1(y, z)
|
F1 score: `2 * (p * r) / (p + r)`, where p=precision and r=recall.
| 3.096139
| 2.342613
| 1.321661
|
tp, tn, fp, fn = contingency_table(y, z)
return (tp * tn - fp * fn) / K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
|
def mcc(y, z)
|
Matthews correlation coefficient
| 2.29163
| 2.286322
| 1.002322
|
weights = _cat_sample_weights(y)
_acc = K.cast(K.equal(K.argmax(y, axis=-1),
K.argmax(z, axis=-1)),
K.floatx())
_acc = K.sum(_acc * weights) / K.sum(weights)
return _acc
|
def cat_acc(y, z)
|
Classification accuracy for multi-categorical case
| 2.508198
| 2.571926
| 0.975222
|
var_resid = K.var(y_true - y_pred)
var_y_true = K.var(y_true)
return 1 - var_resid / var_y_true
|
def var_explained(y_true, y_pred)
|
Fraction of variance explained.
| 2.75211
| 2.973107
| 0.925668
|
test_len(train)
y = train[1]
n_rows = y.shape[0]
if stratified:
if len(y.shape) > 1:
if y.shape[1] > 1:
raise ValueError("Can't use stratified K-fold with multi-column response variable")
else:
y = y[:, 0]
# http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html#sklearn.model_selection.StratifiedKFold.split
return model_selection.StratifiedKFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\
.split(X=np.zeros((n_rows, 1)), y=y)
else:
return model_selection.KFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\
.split(X=np.zeros((n_rows, 1)))
|
def split_KFold_idx(train, cv_n_folds=5, stratified=False, random_state=None)
|
Get k-fold indices generator
| 2.13153
| 2.066272
| 1.031582
|
test_len(train)
y = train[1][idx]
# x split
if isinstance(train[0], (list, tuple)):
x = [x[idx] for x in train[0]]
elif isinstance(train[0], dict):
x = {k: v[idx] for k, v in train[0].items()}
elif isinstance(train[0], np.ndarray):
x = train[0][idx]
else:
raise ValueError("Input can only be of type: list, dict or np.ndarray")
if keep_other:
return (x, y) + train[2:]
else:
return (x, y)
|
def subset(train, idx, keep_other=True)
|
Subset the `train=(x, y)` data tuple, each of the form:
- list, np.ndarray
- tuple, np.ndarray
- dictionary, np.ndarray
- np.ndarray, np.ndarray
# Note
In case there are other data present in the tuple:
`(x, y, other1, other2, ...)`, these get passed on as:
`(x_sub, y_sub, other1, other2)`
# Arguments
train: `(x,y, other1, other2, ...)` tuple of data
idx: indices to subset the data with
keep_other: bool; If True, the additional tuple elements `(other1, other2, ...)` are passed
together with `(x, y)` but don't get subsetted.
| 2.553924
| 2.592581
| 0.985089
|
if type(response) is str:
response = [response]
X_feat = np.array(dt[features], dtype="float32")
y = np.array(dt[response], dtype="float32")
X_seq = encodeDNA(seq_vec=dt[sequence],
maxlen=trim_seq_len,
seq_align=seq_align)
X_seq = np.array(X_seq, dtype="float32")
id_vec = np.array(dt[id_column])
return X_feat, X_seq, y, id_vec
|
def prepare_data(dt, features, response, sequence, id_column=None, seq_align="end", trim_seq_len=None)
|
Prepare data for Concise.train or ConciseCV.train.
Args:
dt: A pandas DataFrame containing all the required data.
features (List of strings): Column names of `dt` used to produce the features design matrix. These columns should be numeric.
response (str or list of strings): Name(s) of column(s) used as a reponse variable.
sequence (str): Name of the column storing the DNA/RNA sequences.
id_column (str): Name of the column used as the row identifier.
seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences?
trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered.
standardize_features (bool): If True, column in the returned matrix matrix :py:attr:`X_seq` are normalied to have zero mean and unit variance.
Returns:
tuple: Tuple with elements: :code:`(X_feat: X_seq, y, id_vec)`, where:
- :py:attr:`X_feat`: features design matrix of shape :code:`(N, D)`, where N is :code:`len(dt)` and :code:`D = len(features)`
- :py:attr:`X_seq`: sequence matrix of shape :code:`(N, 1, trim_seq_len, 4)`. It represents 1-hot encoding of the DNA/RNA sequence.
- :py:attr:`y`: Response variable 1-column matrix of shape :code:`(N, 1)`
- :py:attr:`id_vec`: 1D Character array of shape :code:`(N)`. It represents the ID's of individual rows.
Note:
One-hot encoding of the DNA/RNA sequence is the following:
.. code:: python
{
"A": np.array([1, 0, 0, 0]),
"C": np.array([0, 1, 0, 0]),
"G": np.array([0, 0, 1, 0]),
"T": np.array([0, 0, 0, 1]),
"U": np.array([0, 0, 0, 1]),
"N": np.array([0, 0, 0, 0]),
}
| 2.448302
| 2.328122
| 1.051621
|
x = np.copy(x)
if minval is not None:
x[x < minval] = minval
if maxval is not None:
x[x > maxval] = maxval
return x
|
def _trunc(x, minval=None, maxval=None)
|
Truncate vector values to have values on range [minval, maxval]
| 1.647238
| 1.500808
| 1.097568
|
# TODO - make it general...
if len(x.shape) == 1:
x = x.reshape((-1, 1))
if start is None:
start = np.nanmin(x)
else:
if x.min() < start:
if warn:
print("WARNING, x.min() < start for some elements. Truncating them to start: x[x < start] = start")
x = _trunc(x, minval=start)
if end is None:
end = np.nanmax(x)
else:
if x.max() > end:
if warn:
print("WARNING, x.max() > end for some elements. Truncating them to end: x[x > end] = end")
x = _trunc(x, maxval=end)
bs = BSpline(start, end,
n_bases=n_bases,
spline_order=spline_order
)
# concatenate x to long
assert len(x.shape) == 2
n_rows = x.shape[0]
n_cols = x.shape[1]
x_long = x.reshape((-1,))
x_feat = bs.predict(x_long, add_intercept=False) # shape = (n_rows * n_cols, n_bases)
x_final = x_feat.reshape((n_rows, n_cols, n_bases))
return x_final
|
def encodeSplines(x, n_bases=10, spline_order=3, start=None, end=None, warn=True)
|
**Deprecated**. Function version of the transformer class `EncodeSplines`.
Get B-spline base-function expansion
# Details
First, the knots for B-spline basis functions are placed
equidistantly on the [start, end] range.
(inferred from the data if None). Next, b_n(x) value is
is computed for each x and each n (spline-index) with
`scipy.interpolate.splev`.
# Arguments
x: a numpy array of positions with 2 dimensions
n_bases int: Number of spline bases.
spline_order: 2 for quadratic, 3 for qubic splines
start, end: range of values. If None, they are inferred from the data
as minimum and maximum value.
warn: Show warnings.
# Returns
`np.ndarray` of shape `(x.shape[0], x.shape[1], n_bases)`
| 2.539597
| 2.574925
| 0.98628
|
assert x.ndim > 1
self.data_min_ = np.min(x, axis=tuple(range(x.ndim - 1)))
self.data_max_ = np.max(x, axis=tuple(range(x.ndim - 1)))
if self.share_knots:
self.data_min_[:] = np.min(self.data_min_)
self.data_max_[:] = np.max(self.data_max_)
|
def fit(self, x)
|
Calculate the knot placement from the values ranges.
# Arguments
x: numpy array, either N x D or N x L x D dimensional.
| 2.389806
| 2.428111
| 0.984224
|
# 1. split across last dimension
# 2. re-use ranges
# 3. Merge
array_list = [encodeSplines(x[..., i].reshape((-1, 1)),
n_bases=self.n_bases,
spline_order=self.degree,
warn=warn,
start=self.data_min_[i],
end=self.data_max_[i]).reshape(x[..., i].shape + (self.n_bases,))
for i in range(x.shape[-1])]
return np.stack(array_list, axis=-2)
|
def transform(self, x, warn=True)
|
Obtain the transformed values
| 5.46399
| 5.432024
| 1.005885
|
if ignore_stop_codons:
vocab = CODONS
else:
vocab = CODONS + STOP_CODONS
assert seq_length % 3 == 0
return Input((seq_length / 3, len(vocab)), name=name, **kwargs)
|
def InputCodon(seq_length, ignore_stop_codons=True, name=None, **kwargs)
|
Input placeholder for array returned by `encodeCodon`
Note: The seq_length is divided by 3
Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)`
| 3.21322
| 2.842201
| 1.130539
|
return Input((seq_length, len(AMINO_ACIDS)), name=name, **kwargs)
|
def InputAA(seq_length, name=None, **kwargs)
|
Input placeholder for array returned by `encodeAA`
Wrapper for: `keras.layers.Input((seq_length, 22), name=name, **kwargs)`
| 4.39779
| 4.639793
| 0.947842
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.