_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q7000
Logger.subscribe
train
def subscribe(self, queue=None, *levels): """ Subscribe to the aggregated log stream. On subscribe a ledis queue will be fed with all running processes logs. Always use the returned queue name from this method, even if u specified the queue name to use Note: it is legal to subscribe to the same queue, but would be a bad logic if two processes are trying to read from the same queue. :param queue: Your unique queue name, otherwise, a one will get generated for your :param levels: :return: queue name to pull from """ args = { 'queue': queue, 'levels': list(levels), } self._subscribe_chk.check(args) return self._client.json('logger.subscribe', args)
python
{ "resource": "" }
q7001
AggregatorManager.query
train
def query(self, key=None, **tags): """ Query zero-os aggregator for current state object of monitored metrics. Note: ID is returned as part of the key (if set) to avoid conflict with similar metrics that has same key. For example, a cpu core nr can be the id associated with 'machine.CPU.percent' so we can return all values for all the core numbers in the same dict. U can filter on the ID as a tag :example: self.query(key=key, id=value) :param key: metric key (ex: machine.memory.ram.available) :param tags: optional tags filter :return: dict of { 'key[/id]': state object } """ args = { 'key': key, 'tags': tags, } self._query_chk.check(args) return self._client.json('aggregator.query', args)
python
{ "resource": "" }
q7002
CGroupManager.ensure
train
def ensure(self, subsystem, name): """ Creates a cgroup if it doesn't exist under the specified subsystem and the given name :param subsystem: the cgroup subsystem (currently support 'memory', and 'cpuset') :param name: name of the cgroup to delete """ args = { 'subsystem': subsystem, 'name': name, } self._cgroup_chk.check(args) return self._client.json('cgroup.ensure', args)
python
{ "resource": "" }
q7003
dlmk
train
def dlmk(l,m,k,theta1): """ returns value of d^l_mk as defined in allen, ottewill 97. Called by Dlmk """ if m >= k: factor = sqrt(factorial(l-k)*factorial(l+m)/factorial(l+k)/factorial(l-m)) part2 = (cos(theta1/2))**(2*l+k-m)*(-sin(theta1/2))**(m-k)/factorial(m-k) part3 = sp.hyp2f1(m-l,-k-l,m-k+1,-(tan(theta1/2))**2) return factor*part2*part3 else: return (-1)**(m-k) * dlmk(l,k,m,theta1)
python
{ "resource": "" }
q7004
Dlmk
train
def Dlmk(l,m,k,phi1,phi2,theta1,theta2): """ returns value of D^l_mk as defined in allen, ottewill 97. """ return exp(complex(0.,-m*phi1)) * dlmk(l,m,k,theta1) * \ exp(complex(0.,-k*gamma(phi1,phi2,theta1,theta2)))
python
{ "resource": "" }
q7005
gamma
train
def gamma(phi1,phi2,theta1,theta2): """ calculate third rotation angle inputs are angles from 2 pulsars returns the angle. """ if phi1 == phi2 and theta1 == theta2: gamma = 0 else: gamma = atan( sin(theta2)*sin(phi2-phi1) / \ (cos(theta1)*sin(theta2)*cos(phi1-phi2) - \ sin(theta1)*cos(theta2)) ) dummy_arg = (cos(gamma)*cos(theta1)*sin(theta2)*cos(phi1-phi2) + \ sin(gamma)*sin(theta2)*sin(phi2-phi1) - \ cos(gamma)*sin(theta1)*cos(theta2)) if dummy_arg >= 0: return gamma else: return pi + gamma
python
{ "resource": "" }
q7006
rotated_Gamma_ml
train
def rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml): """ This function takes any gamma in the computational frame and rotates it to the cosmic frame. """ rotated_gamma = 0 for ii in range(2*l+1): rotated_gamma += Dlmk(l,m,ii-l,phi1,phi2,theta1,theta2).conjugate()*gamma_ml[ii] return rotated_gamma
python
{ "resource": "" }
q7007
real_rotated_Gammas
train
def real_rotated_Gammas(m,l,phi1,phi2,theta1,theta2,gamma_ml): """ This function returns the real-valued form of the Overlap Reduction Functions, see Eqs 47 in Mingarelli et al, 2013. """ if m>0: ans=(1./sqrt(2))*(rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml) + \ (-1)**m*rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real if m==0: return rotated_Gamma_ml(0,l,phi1,phi2,theta1,theta2,gamma_ml).real if m<0: ans=(1./sqrt(2)/complex(0.,1))*(rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml) - \ (-1)**m*rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real
python
{ "resource": "" }
q7008
chisq
train
def chisq(psr,formbats=False): """Return the total chisq for the current timing solution, removing noise-averaged mean residual, and ignoring deleted points.""" if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) return numpy.sum(res * res / (1e-12 * err * err))
python
{ "resource": "" }
q7009
dchisq
train
def dchisq(psr,formbats=False,renormalize=True): """Return gradient of total chisq for the current timing solution, after removing noise-averaged mean residual, and ignoring deleted points.""" if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) # bats already updated by residuals(); skip constant-phase column M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[psr.deleted==0,1:] # renormalize design-matrix columns if renormalize: norm = numpy.sqrt(numpy.sum(M**2,axis=0)) M /= norm else: norm = 1.0 # compute chisq derivative, de-renormalize dr = -2 * numpy.dot(M.T,res / (1e-12 * err**2)) * norm return dr
python
{ "resource": "" }
q7010
create_fourier_design_matrix
train
def create_fourier_design_matrix(t, nmodes, freq=False, Tspan=None, logf=False, fmin=None, fmax=None): """ Construct fourier design matrix from eq 11 of Lentati et al, 2013 :param t: vector of time series in seconds :param nmodes: number of fourier coefficients to use :param freq: option to output frequencies :param Tspan: option to some other Tspan :param logf: use log frequency spacing :param fmin: lower sampling frequency :param fmax: upper sampling frequency :return: F: fourier design matrix :return: f: Sampling frequencies (if freq=True) """ N = len(t) F = np.zeros((N, 2 * nmodes)) if Tspan is not None: T = Tspan else: T = t.max() - t.min() # define sampling frequencies if fmin is not None and fmax is not None: f = np.linspace(fmin, fmax, nmodes) else: f = np.linspace(1 / T, nmodes / T, nmodes) if logf: f = np.logspace(np.log10(1 / T), np.log10(nmodes / T), nmodes) Ffreqs = np.zeros(2 * nmodes) Ffreqs[0::2] = f Ffreqs[1::2] = f F[:,::2] = np.sin(2*np.pi*t[:,None]*f[None,:]) F[:,1::2] = np.cos(2*np.pi*t[:,None]*f[None,:]) if freq: return F, Ffreqs else: return F
python
{ "resource": "" }
q7011
powerlaw
train
def powerlaw(f, log10_A=-16, gamma=5): """Power-law PSD. :param f: Sampling frequencies :param log10_A: log10 of red noise Amplitude [GW units] :param gamma: Spectral index of red noise process """ fyr = 1 / 3.16e7 return (10**log10_A)**2 / 12.0 / np.pi**2 * fyr**(gamma-3) * f**(-gamma)
python
{ "resource": "" }
q7012
add_gwb
train
def add_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5, gwAmp=1e-20, alpha=-0.66, logspacing=True): """Add a stochastic background from inspiraling binaries, using the tempo2 code that underlies the GWbkgrd plugin. Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object """ gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) gwb.add_gwb(psr,dist) return gwb
python
{ "resource": "" }
q7013
add_dipole_gwb
train
def add_dipole_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5, gwAmp=1e-20, alpha=-0.66, logspacing=True, dipoleamps=None, dipoledir=None, dipolemag=None): """Add a stochastic background from inspiraling binaries distributed according to a pure dipole distribution, using the tempo2 code that underlies the GWdipolebkgrd plugin. The basic use is identical to that of 'add_gwb': Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. Additionally, the dipole component can be specified by using one of two methods: 1) Specify the dipole direction as three dipole amplitudes, in the vector dipoleamps 2) Specify the direction of the dipole as a magnitude dipolemag, and a vector dipoledir=[dipolephi, dipoletheta] It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object """ gwb = GWB(ngw, seed, flow, fhigh, gwAmp, alpha, logspacing, dipoleamps, dipoledir, dipolemag) gwb.add_gwb(psr,dist) return gwb
python
{ "resource": "" }
q7014
add_efac
train
def add_efac(psr, efac=1.0, flagid=None, flags=None, seed=None): """Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) # default efacvec efacvec = N.ones(psr.nobs) # check that efac is scalar if flags is None if flags is None: if not N.isscalar(efac): raise ValueError('ERROR: If flags is None, efac must be a scalar') else: efacvec = N.ones(psr.nobs) * efac if flags is not None and flagid is not None and not N.isscalar(efac): if len(efac) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) efacvec[ind] = efac[ct] psr.stoas[:] += efacvec * psr.toaerrs * (1e-6 / day) * N.random.randn(psr.nobs)
python
{ "resource": "" }
q7015
extrap1d
train
def extrap1d(interpolator): """ Function to extend an interpolation function to an extrapolation function. :param interpolator: scipy interp1d object :returns ufunclike: extension of function to extrapolation """ xs = interpolator.x ys = interpolator.y def pointwise(x): if x < xs[0]: return ys[0] # +(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0]) elif x > xs[-1]: return ys[-1] # +(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2]) else: return interpolator(x) def ufunclike(xs): return N.array(map(pointwise, N.array(xs))) return ufunclike
python
{ "resource": "" }
q7016
computeORFMatrix
train
def computeORFMatrix(psr): """ Compute ORF matrix. :param psr: List of pulsar object instances :returns: Matrix that has the ORF values for every pulsar pair with 2 on the diagonals to account for the pulsar term. """ # begin loop over all pulsar pairs and calculate ORF npsr = len(psr) ORF = N.zeros((npsr, npsr)) phati = N.zeros(3) phatj = N.zeros(3) ptheta = [N.pi/2 - p['DECJ'].val for p in psr] pphi = [p['RAJ'].val for p in psr] for ll in range(0, npsr): phati[0] = N.cos(pphi[ll]) * N.sin(ptheta[ll]) phati[1] = N.sin(pphi[ll]) * N.sin(ptheta[ll]) phati[2] = N.cos(ptheta[ll]) for kk in range(0, npsr): phatj[0] = N.cos(pphi[kk]) * N.sin(ptheta[kk]) phatj[1] = N.sin(pphi[kk]) * N.sin(ptheta[kk]) phatj[2] = N.cos(ptheta[kk]) if ll != kk: xip = (1.-N.sum(phati*phatj)) / 2. ORF[ll, kk] = 3.*( 1./3. + xip * ( N.log(xip) -1./6.) ) else: ORF[ll, kk] = 2.0 return ORF
python
{ "resource": "" }
q7017
plotres
train
def plotres(psr,deleted=False,group=None,**kwargs): """Plot residuals, compute unweighted rms residual.""" res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs if (not deleted) and N.any(psr.deleted != 0): res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0] print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs)) meanres = math.sqrt(N.mean(res**2)) / 1e-6 if group is None: i = N.argsort(t) P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs) else: if (not deleted) and N.any(psr.deleted): flagmask = psr.flagvals(group)[~psr.deleted] else: flagmask = psr.flagvals(group) unique = list(set(flagmask)) for flagval in unique: f = (flagmask == flagval) flagres, flagt, flagerrs = res[f], t[f], errs[f] i = N.argsort(flagt) P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs) P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1)) P.xlabel('MJD'); P.ylabel('res [us]') P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
python
{ "resource": "" }
q7018
plotgwsrc
train
def plotgwsrc(gwb): """ Plot a GWB source population as a mollweide projection. """ theta, phi, omega, polarization = gwb.gw_dist() rho = phi-N.pi eta = 0.5*N.pi - theta # I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014: # /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485: # RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2)) #old_settings = N.seterr(invalid='ignore') P.title("GWB source population") ax = P.axes(projection='mollweide') foo = P.scatter(rho, eta, marker='.', s=1) #bar = N.seterr(**old_settings) return foo
python
{ "resource": "" }
q7019
merge
train
def merge(data,skip=50,fraction=1.0): """Merge one every 'skip' clouds into a single emcee population, using the later 'fraction' of the run.""" w,s,d = data.chains.shape start = int((1.0 - fraction) * s) total = int((s - start) / skip) return data.chains[:,start::skip,:].reshape((w*total,d))
python
{ "resource": "" }
q7020
cull
train
def cull(data,index,min=None,max=None): """Sieve an emcee clouds by excluding walkers with search variable 'index' smaller than 'min' or larger than 'max'.""" ret = data if min is not None: ret = ret[ret[:,index] > min,:] if max is not None: ret = ret[ret[:,index] < max,:] return ret
python
{ "resource": "" }
q7021
make_ecc_interpolant
train
def make_ecc_interpolant(): """ Make interpolation function from eccentricity file to determine number of harmonics to use for a given eccentricity. :returns: interpolant """ pth = resource_filename(Requirement.parse('libstempo'), 'libstempo/ecc_vs_nharm.txt') fil = np.loadtxt(pth) return interp1d(fil[:,0], fil[:,1])
python
{ "resource": "" }
q7022
best_kmers
train
def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1, seq_align="start", trim_seq_len=None): """ Find best k-mers for CONCISE initialization. Args: dt (pd.DataFrame): Table containing response variable and sequence. response (str): Name of the column used as the reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. k (int): Desired k-mer length. n_cores (int): Number of cores to use for computation. It can use up to 3 cores. consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG? seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. Returns: string list: Best set of motifs for this dataset sorted with respect to confidence (best candidate occuring first). Details: First a lasso model gets fitted to get a set of initial motifs. Next, the best subset of unrelated motifs is selected by stepwise selection. """ y = dt[response] seq = dt[sequence] if trim_seq_len is not None: seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len) seq = [s.replace("N", "") for s in seq] dt_kmer = kmer_count(seq, k) Xsp = csc_matrix(dt_kmer) en = ElasticNet(alpha=1, standardize=False, n_splits=3) en.fit(Xsp, y) # which coefficients are nonzero?= nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist() # perform stepwise selection # # TODO - how do we deal with the intercept? # largest number of motifs where they don't differ by more than 1 k-mer def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True): """ perform stepwise model selection while preventing to add a motif similar to the already selected motifs. """ F, pval = f_regression(dt_kmer[to_be_selected_kmers], y) kmer = to_be_selected_kmers.pop(pval.argmin()) selected_kmers.append(kmer) def select_criterion(s1, s2, consider_shift=True): if hamming_distance(s1, s2) <= 1: return False if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0: return False if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0: return False return True to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers if select_criterion(ckmer, kmer, consider_shift)] if len(to_be_selected_kmers) == 0: return selected_kmers else: # regress out the new feature lm = LinearRegression() lm.fit(dt_kmer[selected_kmers], y) y_new = y - lm.predict(dt_kmer[selected_kmers]) return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift) selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift) return selected_kmers
python
{ "resource": "" }
q7023
kmer_count
train
def kmer_count(seq_list, k): """ Generate k-mer counts from a set of sequences Args: seq_list (iterable): List of DNA sequences (with letters from {A, C, G, T}) k (int): K in k-mer. Returns: pandas.DataFrame: Count matrix for seach sequence in seq_list Example: >>> kmer_count(["ACGTTAT", "GACGCGA"], 2) AA AC AG AT CA CC CG CT GA GC GG GT TA TC TG TT 0 0 1 0 1 0 0 1 0 0 0 0 1 1 0 0 1 1 0 1 0 0 0 0 2 0 2 1 0 0 0 0 0 0 """ # generate all k-mers all_kmers = generate_all_kmers(k) kmer_count_list = [] for seq in seq_list: kmer_count_list.append([seq.count(kmer) for kmer in all_kmers]) return pd.DataFrame(kmer_count_list, columns=all_kmers)
python
{ "resource": "" }
q7024
generate_all_kmers
train
def generate_all_kmers(k): """ Generate all possible k-mers Example: >>> generate_all_kmers(2) ['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'] """ bases = ['A', 'C', 'G', 'T'] return [''.join(p) for p in itertools.product(bases, repeat=k)]
python
{ "resource": "" }
q7025
dict_to_numpy_dict
train
def dict_to_numpy_dict(obj_dict): """ Convert a dictionary of lists into a dictionary of numpy arrays """ return {key: np.asarray(value) if value is not None else None for key, value in obj_dict.items()}
python
{ "resource": "" }
q7026
rec_dict_to_numpy_dict
train
def rec_dict_to_numpy_dict(obj_dict): """ Same as dict_to_numpy_dict, but recursive """ if type(obj_dict) == dict: return {key: rec_dict_to_numpy_dict(value) if value is not None else None for key, value in obj_dict.items()} elif obj_dict is None: return None else: return np.asarray(obj_dict)
python
{ "resource": "" }
q7027
compare_numpy_dict
train
def compare_numpy_dict(a, b, exact=True): """ Compare two recursive numpy dictionaries """ if type(a) != type(b) and type(a) != np.ndarray and type(b) != np.ndarray: return False # go through a dictionary if type(a) == dict and type(b) == dict: if not a.keys() == b.keys(): return False for key in a.keys(): res = compare_numpy_dict(a[key], b[key], exact) if res == False: print("false for key = ", key) return False return True # if type(a) == np.ndarray and type(b) == np.ndarray: if type(a) == np.ndarray or type(b) == np.ndarray: if exact: return (a == b).all() else: return np.testing.assert_almost_equal(a, b) if a is None and b is None: return True raise NotImplementedError
python
{ "resource": "" }
q7028
BSpline.getS
train
def getS(self, add_intercept=False): """Get the penalty matrix S Returns np.array, of shape (n_bases + add_intercept, n_bases + add_intercept) """ S = self.S if add_intercept is True: # S <- cbind(0, rbind(0, S)) # in R zeros = np.zeros_like(S[:1, :]) S = np.vstack([zeros, S]) zeros = np.zeros_like(S[:, :1]) S = np.hstack([zeros, S]) return S
python
{ "resource": "" }
q7029
get_pwm_list
train
def get_pwm_list(motif_name_list, pseudocountProb=0.0001): """Get a list of ENCODE PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = _load_motifs() l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[m] + pseudocountProb, name=m) for m in motif_name_list] return pwm_list
python
{ "resource": "" }
q7030
auc
train
def auc(y_true, y_pred, round=True): """Area under the ROC curve """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = y_true.round() if len(y_true) == 0 or len(np.unique(y_true)) < 2: return np.nan return skm.roc_auc_score(y_true, y_pred)
python
{ "resource": "" }
q7031
recall_at_precision
train
def recall_at_precision(y_true, y_pred, precision): """Recall at a certain precision threshold Args: y_true: true labels y_pred: predicted labels precision: resired precision level at which where to compute the recall """ y_true, y_pred = _mask_value_nan(y_true, y_pred) precision, recall, _ = skm.precision_recall_curve(y_true, y_pred) return recall[np.searchsorted(precision - precision, 0)]
python
{ "resource": "" }
q7032
cor
train
def cor(y_true, y_pred): """Compute Pearson correlation coefficient. """ y_true, y_pred = _mask_nan(y_true, y_pred) return np.corrcoef(y_true, y_pred)[0, 1]
python
{ "resource": "" }
q7033
kendall
train
def kendall(y_true, y_pred, nb_sample=100000): """Kendall's tau coefficient, Kendall rank correlation coefficient """ y_true, y_pred = _mask_nan(y_true, y_pred) if len(y_true) > nb_sample: idx = np.arange(len(y_true)) np.random.shuffle(idx) idx = idx[:nb_sample] y_true = y_true[idx] y_pred = y_pred[idx] return kendalltau(y_true, y_pred)[0]
python
{ "resource": "" }
q7034
mad
train
def mad(y_true, y_pred): """Median absolute deviation """ y_true, y_pred = _mask_nan(y_true, y_pred) return np.mean(np.abs(y_true - y_pred))
python
{ "resource": "" }
q7035
mse
train
def mse(y_true, y_pred): """Mean squared error """ y_true, y_pred = _mask_nan(y_true, y_pred) return ((y_true - y_pred) ** 2).mean(axis=None)
python
{ "resource": "" }
q7036
sample_params
train
def sample_params(params): """Randomly sample hyper-parameters stored in a dictionary on a predefined range and scale. Useful for hyper-parameter random search. Args: params (dict): hyper-parameters to sample. Dictionary value-type parsing: - :python:`[1e3, 1e7]` - uniformly sample on a **log10** scale from the interval :python:`(1e3,1e7)` - :python:`(1, 10)` - uniformly sample on a **normal** scale from the interval :python:`(1,10)` - :python:`{1, 2}` - sample from a **set** of values. - :python:`1` - don't sample Returns: dict: Dictionary with the same keys as :py:attr:`params`, but with only one element as the value. Examples: >>> myparams = { "max_pool": True, # allways use True "step_size": [0.09, 0.005], "step_decay": (0.9, 1), "n_splines": {10, None}, # use either 10 or None "some_tuple": {(1,2), (1)}, } >>> concise.sample_params(myparams) {'step_decay': 0.9288, 'step_size': 0.0292, 'max_pool': True, 'n_splines': None, 'some_tuple': (1, 2)} >>> concise.sample_params(myparams) {'step_decay': 0.9243, 'step_size': 0.0293, 'max_pool': True, 'n_splines': None, 'some_tuple': (1)} >>> concise.sample_params(myparams) {'step_decay': 0.9460, 'step_size': 0.0301, 'max_pool': True, 'n_splines': 10, 'some_tuple': (1, 2)} Note: - :python:`{[1,2], [3,4]}` is invalid. Use :python:`{(1,2), (3,4)}` instead. - You can allways use :python:`{}` with a single element to by-pass sampling. """ def sample_log(myrange): x = np.random.uniform(np.log10(myrange[0]), np.log10(myrange[1])) return 10**x def sample_unif(myrange): x = np.random.uniform(myrange[0], myrange[1]) return x def sample_set(myset): x = random.sample(myset, 1) return x[0] def type_dep_sample(myrange): if type(myrange) is list: return sample_log(myrange) if type(myrange) is tuple: return sample_unif(myrange) if type(myrange) is set: return sample_set(myrange) return myrange return {k: type_dep_sample(v) for k, v in params.items()}
python
{ "resource": "" }
q7037
cat_acc
train
def cat_acc(y, z): """Classification accuracy for multi-categorical case """ weights = _cat_sample_weights(y) _acc = K.cast(K.equal(K.argmax(y, axis=-1), K.argmax(z, axis=-1)), K.floatx()) _acc = K.sum(_acc * weights) / K.sum(weights) return _acc
python
{ "resource": "" }
q7038
split_KFold_idx
train
def split_KFold_idx(train, cv_n_folds=5, stratified=False, random_state=None): """Get k-fold indices generator """ test_len(train) y = train[1] n_rows = y.shape[0] if stratified: if len(y.shape) > 1: if y.shape[1] > 1: raise ValueError("Can't use stratified K-fold with multi-column response variable") else: y = y[:, 0] # http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html#sklearn.model_selection.StratifiedKFold.split return model_selection.StratifiedKFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\ .split(X=np.zeros((n_rows, 1)), y=y) else: return model_selection.KFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\ .split(X=np.zeros((n_rows, 1)))
python
{ "resource": "" }
q7039
prepare_data
train
def prepare_data(dt, features, response, sequence, id_column=None, seq_align="end", trim_seq_len=None): """ Prepare data for Concise.train or ConciseCV.train. Args: dt: A pandas DataFrame containing all the required data. features (List of strings): Column names of `dt` used to produce the features design matrix. These columns should be numeric. response (str or list of strings): Name(s) of column(s) used as a reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. id_column (str): Name of the column used as the row identifier. seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. standardize_features (bool): If True, column in the returned matrix matrix :py:attr:`X_seq` are normalied to have zero mean and unit variance. Returns: tuple: Tuple with elements: :code:`(X_feat: X_seq, y, id_vec)`, where: - :py:attr:`X_feat`: features design matrix of shape :code:`(N, D)`, where N is :code:`len(dt)` and :code:`D = len(features)` - :py:attr:`X_seq`: sequence matrix of shape :code:`(N, 1, trim_seq_len, 4)`. It represents 1-hot encoding of the DNA/RNA sequence. - :py:attr:`y`: Response variable 1-column matrix of shape :code:`(N, 1)` - :py:attr:`id_vec`: 1D Character array of shape :code:`(N)`. It represents the ID's of individual rows. Note: One-hot encoding of the DNA/RNA sequence is the following: .. code:: python { "A": np.array([1, 0, 0, 0]), "C": np.array([0, 1, 0, 0]), "G": np.array([0, 0, 1, 0]), "T": np.array([0, 0, 0, 1]), "U": np.array([0, 0, 0, 1]), "N": np.array([0, 0, 0, 0]), } """ if type(response) is str: response = [response] X_feat = np.array(dt[features], dtype="float32") y = np.array(dt[response], dtype="float32") X_seq = encodeDNA(seq_vec=dt[sequence], maxlen=trim_seq_len, seq_align=seq_align) X_seq = np.array(X_seq, dtype="float32") id_vec = np.array(dt[id_column]) return X_feat, X_seq, y, id_vec
python
{ "resource": "" }
q7040
EncodeSplines.fit
train
def fit(self, x): """Calculate the knot placement from the values ranges. # Arguments x: numpy array, either N x D or N x L x D dimensional. """ assert x.ndim > 1 self.data_min_ = np.min(x, axis=tuple(range(x.ndim - 1))) self.data_max_ = np.max(x, axis=tuple(range(x.ndim - 1))) if self.share_knots: self.data_min_[:] = np.min(self.data_min_) self.data_max_[:] = np.max(self.data_max_)
python
{ "resource": "" }
q7041
EncodeSplines.transform
train
def transform(self, x, warn=True): """Obtain the transformed values """ # 1. split across last dimension # 2. re-use ranges # 3. Merge array_list = [encodeSplines(x[..., i].reshape((-1, 1)), n_bases=self.n_bases, spline_order=self.degree, warn=warn, start=self.data_min_[i], end=self.data_max_[i]).reshape(x[..., i].shape + (self.n_bases,)) for i in range(x.shape[-1])] return np.stack(array_list, axis=-2)
python
{ "resource": "" }
q7042
InputCodon
train
def InputCodon(seq_length, ignore_stop_codons=True, name=None, **kwargs): """Input placeholder for array returned by `encodeCodon` Note: The seq_length is divided by 3 Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)` """ if ignore_stop_codons: vocab = CODONS else: vocab = CODONS + STOP_CODONS assert seq_length % 3 == 0 return Input((seq_length / 3, len(vocab)), name=name, **kwargs)
python
{ "resource": "" }
q7043
InputAA
train
def InputAA(seq_length, name=None, **kwargs): """Input placeholder for array returned by `encodeAA` Wrapper for: `keras.layers.Input((seq_length, 22), name=name, **kwargs)` """ return Input((seq_length, len(AMINO_ACIDS)), name=name, **kwargs)
python
{ "resource": "" }
q7044
InputRNAStructure
train
def InputRNAStructure(seq_length, name=None, **kwargs): """Input placeholder for array returned by `encodeRNAStructure` Wrapper for: `keras.layers.Input((seq_length, 5), name=name, **kwargs)` """ return Input((seq_length, len(RNAplfold_PROFILES)), name=name, **kwargs)
python
{ "resource": "" }
q7045
ConvSequence._plot_weights_heatmap
train
def _plot_weights_heatmap(self, index=None, figsize=None, **kwargs): """Plot weights as a heatmap index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap """ W = self.get_weights()[0] if index is None: index = np.arange(W.shape[2]) fig = heatmap(np.swapaxes(W[:, :, index], 0, 1), plot_name="filter: ", vocab=self.VOCAB, figsize=figsize, **kwargs) # plt.show() return fig
python
{ "resource": "" }
q7046
ConvSequence._plot_weights_motif
train
def _plot_weights_motif(self, index, plot_type="motif_raw", background_probs=DEFAULT_BASE_BACKGROUND, ncol=1, figsize=None): """Index can only be a single int """ w_all = self.get_weights() if len(w_all) == 0: raise Exception("Layer needs to be initialized first") W = w_all[0] if index is None: index = np.arange(W.shape[2]) if isinstance(index, int): index = [index] fig = plt.figure(figsize=figsize) if plot_type == "motif_pwm" and plot_type in self.AVAILABLE_PLOTS: arr = pssm_array2pwm_array(W, background_probs) elif plot_type == "motif_raw" and plot_type in self.AVAILABLE_PLOTS: arr = W elif plot_type == "motif_pwm_info" and plot_type in self.AVAILABLE_PLOTS: quasi_pwm = pssm_array2pwm_array(W, background_probs) arr = _pwm2pwm_info(quasi_pwm) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS)) fig = seqlogo_fig(arr, vocab=self.VOCAB_name, figsize=figsize, ncol=ncol, plot_name="filter: ") # fig.show() return fig
python
{ "resource": "" }
q7047
ConvSequence.plot_weights
train
def plot_weights(self, index=None, plot_type="motif_raw", figsize=None, ncol=1, **kwargs): """Plot filters as heatmap or motifs index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap """ if "heatmap" in self.AVAILABLE_PLOTS and plot_type == "heatmap": return self._plot_weights_heatmap(index=index, figsize=figsize, ncol=ncol, **kwargs) elif plot_type[:5] == "motif": return self._plot_weights_motif(index=index, plot_type=plot_type, figsize=figsize, ncol=ncol, **kwargs) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS))
python
{ "resource": "" }
q7048
_check_pwm_list
train
def _check_pwm_list(pwm_list): """Check the input validity """ for pwm in pwm_list: if not isinstance(pwm, PWM): raise TypeError("element {0} of pwm_list is not of type PWM".format(pwm)) return True
python
{ "resource": "" }
q7049
heatmap
train
def heatmap(w, vmin=None, vmax=None, diverge_color=False, ncol=1, plot_name=None, vocab=["A", "C", "G", "T"], figsize=(6, 2)): """Plot a heatmap from weight matrix w vmin, vmax = z axis range diverge_color = Should we use diverging colors? plot_name = plot_title vocab = vocabulary (corresponds to the first axis) """ # Generate y and x values from the dimension lengths assert len(vocab) == w.shape[0] plt_y = np.arange(w.shape[0] + 1) + 0.5 plt_x = np.arange(w.shape[1] + 1) - 0.5 z_min = w.min() z_max = w.max() if vmin is None: vmin = z_min if vmax is None: vmax = z_max if diverge_color: color_map = plt.cm.RdBu else: color_map = plt.cm.Blues fig = plt.figure(figsize=figsize) # multiple axis if len(w.shape) == 3: # n_plots = w.shape[2] nrow = math.ceil(n_plots / ncol) else: n_plots = 1 nrow = 1 ncol = 1 for i in range(n_plots): if len(w.shape) == 3: w_cur = w[:, :, i] else: w_cur = w ax = plt.subplot(nrow, ncol, i + 1) plt.tight_layout() im = ax.pcolormesh(plt_x, plt_y, w_cur, cmap=color_map, vmin=vmin, vmax=vmax, edgecolors="white") ax.grid(False) ax.set_yticklabels([""] + vocab, minor=False) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) ax.set_xticks(np.arange(w_cur.shape[1] + 1)) ax.set_xlim(plt_x.min(), plt_x.max()) ax.set_ylim(plt_y.min(), plt_y.max()) # nice scale location: # http://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(im, cax=cax) if plot_name is not None: if n_plots > 0: pln = plot_name + " {0}".format(i) else: pln = plot_name ax.set_title(pln) ax.set_aspect('equal') return fig
python
{ "resource": "" }
q7050
add_letter_to_axis
train
def add_letter_to_axis(ax, let, col, x, y, height): """Add 'let' with position x,y and height height to matplotlib axis 'ax'. """ if len(let) == 2: colors = [col, "white"] elif len(let) == 1: colors = [col] else: raise ValueError("3 or more Polygons are not supported") for polygon, color in zip(let, colors): new_polygon = affinity.scale( polygon, yfact=height, origin=(0, 0, 0)) new_polygon = affinity.translate( new_polygon, xoff=x, yoff=y) patch = PolygonPatch( new_polygon, edgecolor=color, facecolor=color) ax.add_patch(patch) return
python
{ "resource": "" }
q7051
seqlogo
train
def seqlogo(letter_heights, vocab="DNA", ax=None): """Make a logo plot # Arguments letter_heights: "motif length" x "vocabulary size" numpy array Can also contain negative values. vocab: str, Vocabulary name. Can be: DNA, RNA, AA, RNAStruct. ax: matplotlib axis """ ax = ax or plt.gca() assert letter_heights.shape[1] == len(VOCABS[vocab]) x_range = [1, letter_heights.shape[0]] pos_heights = np.copy(letter_heights) pos_heights[letter_heights < 0] = 0 neg_heights = np.copy(letter_heights) neg_heights[letter_heights > 0] = 0 for x_pos, heights in enumerate(letter_heights): letters_and_heights = sorted(zip(heights, list(VOCABS[vocab].keys()))) y_pos_pos = 0.0 y_neg_pos = 0.0 for height, letter in letters_and_heights: color = VOCABS[vocab][letter] polygons = letter_polygons[letter] if height > 0: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_pos_pos, height) y_pos_pos += height else: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_neg_pos, height) y_neg_pos += height # if add_hline: # ax.axhline(color="black", linewidth=1) ax.set_xlim(x_range[0] - 1, x_range[1] + 1) ax.grid(False) ax.set_xticks(list(range(*x_range)) + [x_range[-1]]) ax.set_aspect(aspect='auto', adjustable='box') ax.autoscale_view()
python
{ "resource": "" }
q7052
get_cv_accuracy
train
def get_cv_accuracy(res): """ Extract the cv accuracy from the model """ ac_list = [(accuracy["train_acc_final"], accuracy["test_acc_final"] ) for accuracy, weights in res] ac = np.array(ac_list) perf = { "mean_train_acc": np.mean(ac[:, 0]), "std_train_acc": np.std(ac[:, 0]), "mean_test_acc": np.mean(ac[:, 1]), "std_test_acc": np.std(ac[:, 1]), } return perf
python
{ "resource": "" }
q7053
one_hot2string
train
def one_hot2string(arr, vocab): """Convert a one-hot encoded array back to string """ tokens = one_hot2token(arr) indexToLetter = _get_index_dict(vocab) return [''.join([indexToLetter[x] for x in row]) for row in tokens]
python
{ "resource": "" }
q7054
tokenize
train
def tokenize(seq, vocab, neutral_vocab=[]): """Convert sequence to integers # Arguments seq: Sequence to encode vocab: Vocabulary to use neutral_vocab: Neutral vocabulary -> assign those values to -1 # Returns List of length `len(seq)` with integers from `-1` to `len(vocab) - 1` """ # Req: all vocabs have the same length if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] nchar = len(vocab[0]) for l in vocab + neutral_vocab: assert len(l) == nchar assert len(seq) % nchar == 0 # since we are using striding vocab_dict = _get_vocab_dict(vocab) for l in neutral_vocab: vocab_dict[l] = -1 # current performance bottleneck return [vocab_dict[seq[(i * nchar):((i + 1) * nchar)]] for i in range(len(seq) // nchar)]
python
{ "resource": "" }
q7055
encodeSequence
train
def encodeSequence(seq_vec, vocab, neutral_vocab, maxlen=None, seq_align="start", pad_value="N", encode_type="one_hot"): """Convert a list of genetic sequences into one-hot-encoded array. # Arguments seq_vec: list of strings (genetic sequences) vocab: list of chars: List of "words" to use as the vocabulary. Can be strings of length>0, but all need to have the same length. For DNA, this is: ["A", "C", "G", "T"]. neutral_vocab: list of chars: Values used to pad the sequence or represent unknown-values. For DNA, this is: ["N"]. maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? encode_type: "one_hot" or "token". "token" represents each vocab element as a positive integer from 1 to len(vocab) + 1. neutral_vocab is represented with 0. # Returns Array with shape for encode_type: - "one_hot": `(len(seq_vec), maxlen, len(vocab))` - "token": `(len(seq_vec), maxlen)` If `maxlen=None`, it gets the value of the longest sequence length from `seq_vec`. """ if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] if isinstance(seq_vec, str): raise ValueError("seq_vec should be an iterable returning " + "strings not a string itself") assert len(vocab[0]) == len(pad_value) assert pad_value in neutral_vocab assert encode_type in ["one_hot", "token"] seq_vec = pad_sequences(seq_vec, maxlen=maxlen, align=seq_align, value=pad_value) if encode_type == "one_hot": arr_list = [token2one_hot(tokenize(seq, vocab, neutral_vocab), len(vocab)) for i, seq in enumerate(seq_vec)] elif encode_type == "token": arr_list = [1 + np.array(tokenize(seq, vocab, neutral_vocab)) for seq in seq_vec] # we add 1 to be compatible with keras: https://keras.io/layers/embeddings/ # indexes > 0, 0 = padding element return np.stack(arr_list)
python
{ "resource": "" }
q7056
encodeDNA
train
def encodeDNA(seq_vec, maxlen=None, seq_align="start"): """Convert the DNA sequence into 1-hot-encoding numpy array # Arguments seq_vec: list of chars List of sequences that can have different lengths maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? # Returns 3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4) # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8) >>> X_seq.shape (2, 8, 4) >>> print(X_seq) [[[0 0 0 1] [1 0 0 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [1 0 0 0] [0 0 1 0] [1 0 0 0]] [[0 0 0 0] [0 0 0 0] [0 0 0 1] [0 1 0 0] [0 0 0 1] [0 0 0 1] [0 0 0 1] [1 0 0 0]]] ``` """ return encodeSequence(seq_vec, vocab=DNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
python
{ "resource": "" }
q7057
encodeRNA
train
def encodeRNA(seq_vec, maxlen=None, seq_align="start"): """Convert the RNA sequence into 1-hot-encoding numpy array as for encodeDNA """ return encodeSequence(seq_vec, vocab=RNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
python
{ "resource": "" }
q7058
encodeCodon
train
def encodeCodon(seq_vec, ignore_stop_codons=True, maxlen=None, seq_align="start", encode_type="one_hot"): """Convert the Codon sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/DNA sequences ignore_stop_codons: boolean; if True, STOP_CODONS are omitted from one-hot encoding. maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen / 3, 61 if ignore_stop_codons else 64)` """ if ignore_stop_codons: vocab = CODONS neutral_vocab = STOP_CODONS + ["NNN"] else: vocab = CODONS + STOP_CODONS neutral_vocab = ["NNN"] # replace all U's with A's? seq_vec = [str(seq).replace("U", "T") for seq in seq_vec] return encodeSequence(seq_vec, vocab=vocab, neutral_vocab=neutral_vocab, maxlen=maxlen, seq_align=seq_align, pad_value="NNN", encode_type=encode_type)
python
{ "resource": "" }
q7059
encodeAA
train
def encodeAA(seq_vec, maxlen=None, seq_align="start", encode_type="one_hot"): """Convert the Amino-acid sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/amino-acid sequences maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen, 22)` """ return encodeSequence(seq_vec, vocab=AMINO_ACIDS, neutral_vocab="_", maxlen=maxlen, seq_align=seq_align, pad_value="_", encode_type=encode_type)
python
{ "resource": "" }
q7060
_validate_pos
train
def _validate_pos(df): """Validates the returned positional object """ assert isinstance(df, pd.DataFrame) assert ["seqname", "position", "strand"] == df.columns.tolist() assert df.position.dtype == np.dtype("int64") assert df.strand.dtype == np.dtype("O") assert df.seqname.dtype == np.dtype("O") return df
python
{ "resource": "" }
q7061
get_pwm_list
train
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001): """Get a list of Attract PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = load_motif_db(ATTRACT_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[str(m)] + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
python
{ "resource": "" }
q7062
mask_loss
train
def mask_loss(loss, mask_value=MASK_VALUE): """Generates a new loss function that ignores values where `y_true == mask_value`. # Arguments loss: str; name of the keras loss function from `keras.losses` mask_value: int; which values should be masked # Returns function; Masked version of the `loss` # Example ```python categorical_crossentropy_masked = mask_loss("categorical_crossentropy") ``` """ loss_fn = kloss.deserialize(loss) def masked_loss_fn(y_true, y_pred): # currently not suppoerd with NA's: # - there is no K.is_nan impolementation in keras.backend # - https://github.com/fchollet/keras/issues/1628 mask = K.cast(K.not_equal(y_true, mask_value), K.floatx()) # we divide by the mean to correct for the number of done loss evaluations return loss_fn(y_true * mask, y_pred * mask) / K.mean(mask) masked_loss_fn.__name__ = loss + "_masked" return masked_loss_fn
python
{ "resource": "" }
q7063
get_pwm_list
train
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001): """Get a list of HOCOMOCO PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = load_motif_db(HOCOMOCO_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(_normalize_pwm(l[m]) + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
python
{ "resource": "" }
q7064
Concise._var_res_to_weights
train
def _var_res_to_weights(self, var_res): """ Get model weights """ # transform the weights into our form motif_base_weights_raw = var_res["motif_base_weights"][0] motif_base_weights = np.swapaxes(motif_base_weights_raw, 0, 2) # get weights motif_weights = var_res["motif_weights"] motif_bias = var_res["motif_bias"] final_bias = var_res["final_bias"] feature_weights = var_res["feature_weights"] # get the GAM prediction: spline_pred = None spline_weights = None if self._param["n_splines"] is not None: spline_pred = self._splines["X_spline"].dot(var_res["spline_weights"]) if self._param["spline_exp"] is True: spline_pred = np.exp(spline_pred) else: spline_pred = (spline_pred + 1) spline_pred.reshape([-1]) spline_weights = var_res["spline_weights"] weights = {"motif_base_weights": motif_base_weights, "motif_weights": motif_weights, "motif_bias": motif_bias, "final_bias": final_bias, "feature_weights": feature_weights, "spline_pred": spline_pred, "spline_weights": spline_weights } return weights
python
{ "resource": "" }
q7065
Concise._get_var_res
train
def _get_var_res(self, graph, var, other_var): """ Get the weights from our graph """ with tf.Session(graph=graph) as sess: sess.run(other_var["init"]) # all_vars = tf.all_variables() # print("All variable names") # print([var.name for var in all_vars]) # print("All variable values") # print(sess.run(all_vars)) var_res = self._get_var_res_sess(sess, var) return var_res
python
{ "resource": "" }
q7066
Concise._convert_to_var
train
def _convert_to_var(self, graph, var_res): """ Create tf.Variables from a list of numpy arrays var_res: dictionary of numpy arrays with the key names corresponding to var """ with graph.as_default(): var = {} for key, value in var_res.items(): if value is not None: var[key] = tf.Variable(value, name="tf_%s" % key) else: var[key] = None return var
python
{ "resource": "" }
q7067
Concise.train
train
def train(self, X_feat, X_seq, y, X_feat_valid=None, X_seq_valid=None, y_valid=None, n_cores=3): """Train the CONCISE model :py:attr:`X_feat`, :py:attr:`X_seq`, py:attr:`y` are preferrably returned by the :py:func:`concise.prepare_data` function. Args: X_feat: Numpy (float) array of shape :code:`(N, D)`. Feature design matrix storing :code:`N` training samples and :code:`D` features X_seq: Numpy (float) array of shape :code:`(N, 1, N_seq, 4)`. It represents 1-hot encoding of the DNA/RNA sequence.(:code:`N`-seqeuences of length :code:`N_seq`) y: Numpy (float) array of shape :code:`(N, 1)`. Response variable. X_feat_valid: :py:attr:`X_feat` used for model validation. X_seq_valid: :py:attr:`X_seq` used for model validation. y: :py:attr:`y` used for model validation. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. """ if X_feat_valid is None and X_seq_valid is None and y_valid is None: X_feat_valid = X_feat X_seq_valid = X_seq y_valid = y print("Using training samples also for validation ") # insert one dimension - backcompatiblity X_seq = np.expand_dims(X_seq, axis=1) X_seq_valid = np.expand_dims(X_seq_valid, axis=1) # TODO: implement the re-training feature if self.is_trained() is True: print("Model already fitted. Re-training feature not implemented yet") return # input check assert X_seq.shape[0] == X_feat.shape[0] == y.shape[0] assert y.shape == (X_feat.shape[0], self._num_tasks) # extract data specific parameters self._param["seq_length"] = X_seq.shape[2] self._param["n_add_features"] = X_feat.shape[1] # more input check if not self._param["seq_length"] == X_seq_valid.shape[2]: raise Exception("sequence lengths don't match") # setup splines if self._param["n_splines"] is not None: padd_loss = self._param["motif_length"] - 1 # how much shorter is our sequence, since we don't use padding X_spline, S, _ = splines.get_gam_splines(start=0, end=self._param["seq_length"] - padd_loss - 1, # -1 due to zero-indexing n_bases=self._param["n_splines"], spline_order=3, add_intercept=False) self._splines = {"X_spline": X_spline, "S": S } # setup graph and variables self._graph = tf.Graph() self._var = self._get_var_initialization(self._graph, X_feat_train=X_feat, y_train=y) self._other_var = self._build_graph(self._graph, self._var) # TODO: save the intialized parameters var_res_init = self._get_var_res(self._graph, self._var, self._other_var) self.init_weights = self._var_res_to_weights(var_res=var_res_init) # finally train the model # - it saves the accuracy if self._param["optimizer"] == "adam": _train = self._train_adam elif self._param["optimizer"] == "lbfgs": _train = self._train_lbfgs else: raise Exception("Optimizer {} not implemented".format(self._param["optimizer"])) self._var_res = _train(X_feat, X_seq, y, X_feat_valid, X_seq_valid, y_valid, graph=self._graph, var=self._var, other_var=self._other_var, early_stop_patience=self._param["early_stop_patience"], n_cores=n_cores) self._model_fitted = True # TODO: maybe: # - add y_train_accuracy # - y_train return True
python
{ "resource": "" }
q7068
Concise._accuracy_in_session
train
def _accuracy_in_session(self, sess, other_var, X_feat, X_seq, y): """ Compute the accuracy from inside the tf session """ y_pred = self._predict_in_session(sess, other_var, X_feat, X_seq) return ce.mse(y_pred, y)
python
{ "resource": "" }
q7069
Concise._set_var_res
train
def _set_var_res(self, weights): """ Transform the weights to var_res """ if weights is None: return # layer 1 motif_base_weights_raw = np.swapaxes(weights["motif_base_weights"], 2, 0) motif_base_weights = motif_base_weights_raw[np.newaxis] motif_bias = weights["motif_bias"] feature_weights = weights["feature_weights"] spline_weights = weights["spline_weights"] # filter motif_weights = weights["motif_weights"] final_bias = weights["final_bias"] var_res = { "motif_base_weights": motif_base_weights, "motif_bias": motif_bias, "spline_weights": spline_weights, "feature_weights": feature_weights, "motif_weights": motif_weights, "final_bias": final_bias } # cast everything to float32 var_res = {key: value.astype(np.float32) if value is not None else None for key, value in var_res.items()} self._var_res = var_res
python
{ "resource": "" }
q7070
ConciseCV._get_folds
train
def _get_folds(n_rows, n_folds, use_stored): """ Get the used CV folds """ # n_folds = self._n_folds # use_stored = self._use_stored_folds # n_rows = self._n_rows if use_stored is not None: # path = '~/concise/data-offline/lw-pombe/cv_folds_5.json' with open(os.path.expanduser(use_stored)) as json_file: json_data = json.load(json_file) # check if we have the same number of rows and folds: if json_data['N_rows'] != n_rows: raise Exception('N_rows from folds doesnt match the number of rows of X_seq, X_feat, y') if json_data['N_folds'] != n_folds: raise Exception('n_folds dont match', json_data['N_folds'], n_folds) kf = [(np.array(train), np.array(test)) for (train, test) in json_data['folds']] else: kf = KFold(n_splits=n_folds).split(np.zeros((n_rows, 1))) # store in a list i = 1 folds = [] for train, test in kf: fold = "fold_" + str(i) folds.append((fold, train, test)) i = i + 1 return folds
python
{ "resource": "" }
q7071
ConciseCV.train
train
def train(self, X_feat, X_seq, y, id_vec=None, n_folds=10, use_stored_folds=None, n_cores=1, train_global_model=False): """Train the Concise model in cross-validation. Args: X_feat: See :py:func:`concise.Concise.train` X_seq: See :py:func:`concise.Concise.train` y: See :py:func:`concise.Concise.train` id_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`. n_folds (int): Number of CV-folds to use. use_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. train_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`). """ # TODO: input check - dimensions self._use_stored_folds = use_stored_folds self._n_folds = n_folds self._n_rows = X_feat.shape[0] # TODO: - fix the get_cv_accuracy # save: # - each model # - each model's performance # - each model's predictions # - globally: # - mean perfomance # - sd performance # - predictions self._kf = self._get_folds(self._n_rows, self._n_folds, self._use_stored_folds) cv_obj = {} if id_vec is None: id_vec = np.arange(1, self._n_rows + 1) best_val_acc_epoch_l = [] for fold, train, test in self._kf: X_feat_train = X_feat[train] X_seq_train = X_seq[train] y_train = y[train] X_feat_test = X_feat[test] X_seq_test = X_seq[test] y_test = y[test] id_vec_test = id_vec[test] print(fold, "/", n_folds) # copy the object dc = copy.deepcopy(self._concise_model) dc.train(X_feat_train, X_seq_train, y_train, X_feat_test, X_seq_test, y_test, n_cores=n_cores ) dc._test(X_feat_test, X_seq_test, y_test, id_vec_test) cv_obj[fold] = dc best_val_acc_epoch_l.append(dc.get_accuracy()["best_val_acc_epoch"]) self._cv_model = cv_obj # additionaly train the global model if train_global_model: dc = copy.deepcopy(self._concise_model) # overwrite n_epochs with the best average number of best epochs dc._param["n_epochs"] = int(np.array(best_val_acc_epoch_l).mean()) print("tranining global model with n_epochs = " + str(dc._param["n_epochs"])) dc.train(X_feat, X_seq, y, n_cores=n_cores ) dc._test(X_feat, X_seq, y, id_vec) self._concise_global_model = dc
python
{ "resource": "" }
q7072
ConciseCV._from_dict
train
def _from_dict(self, obj_dict): """ Initialize a model from the dictionary """ self._n_folds = obj_dict["param"]["n_folds"] self._n_rows = obj_dict["param"]["n_rows"] self._use_stored_folds = obj_dict["param"]["use_stored_folds"] self._concise_model = Concise.from_dict(obj_dict["init_model"]) if obj_dict["trained_global_model"] is None: self._concise_global_model = None else: self._concise_global_model = Concise.from_dict(obj_dict["trained_global_model"]) self._kf = [(fold, np.asarray(train), np.asarray(test)) for fold, train, test in obj_dict["folds"]] self._cv_model = {fold: Concise.from_dict(model_dict) for fold, model_dict in obj_dict["output"].items()}
python
{ "resource": "" }
q7073
pwm_array2pssm_array
train
def pwm_array2pssm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND): """Convert pwm array to pssm array """ b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return np.log(arr / b).astype(arr.dtype)
python
{ "resource": "" }
q7074
pssm_array2pwm_array
train
def pssm_array2pwm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND): """Convert pssm array to pwm array """ b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return (np.exp(arr) * b).astype(arr.dtype)
python
{ "resource": "" }
q7075
load_motif_db
train
def load_motif_db(filename, skipn_matrix=0): """Read the motif file in the following format ``` >motif_name <skip n>0.1<delim>0.2<delim>0.5<delim>0.6 ... >motif_name2 .... ``` Delim can be anything supported by np.loadtxt # Arguments filename: str, file path skipn_matrix: integer, number of characters to skip when reading the numeric matrix (for Encode = 2) # Returns Dictionary of numpy arrays """ # read-lines if filename.endswith(".gz"): f = gzip.open(filename, 'rt', encoding='utf-8') else: f = open(filename, 'r') lines = f.readlines() f.close() motifs_dict = {} motif_lines = "" motif_name = None def lines2matrix(lines): return np.loadtxt(StringIO(lines)) for line in lines: if line.startswith(">"): if motif_lines: # lines -> matrix motifs_dict[motif_name] = lines2matrix(motif_lines) motif_name = line[1:].strip() motif_lines = "" else: motif_lines += line[skipn_matrix:] if motif_lines and motif_name is not None: motifs_dict[motif_name] = lines2matrix(motif_lines) return motifs_dict
python
{ "resource": "" }
q7076
iter_fasta
train
def iter_fasta(file_path): """Returns an iterator over the fasta file Given a fasta file. yield tuples of header, sequence Code modified from Brent Pedersen's: "Correct Way To Parse A Fasta File In Python" # Example ```python fasta = fasta_iter("hg19.fa") for header, seq in fasta: print(header) ``` """ fh = open(file_path) # ditch the boolean (x[0]) and just keep the header or sequence since # we know they alternate. faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">")) for header in faiter: # drop the ">" headerStr = header.__next__()[1:].strip() # join all sequence lines to one. seq = "".join(s.strip() for s in faiter.__next__()) yield (headerStr, seq)
python
{ "resource": "" }
q7077
write_fasta
train
def write_fasta(file_path, seq_list, name_list=None): """Write a fasta file # Arguments file_path: file path seq_list: List of strings name_list: List of names corresponding to the sequences. If not None, it should have the same length as `seq_list` """ if name_list is None: name_list = [str(i) for i in range(len(seq_list))] # needs to be dict or seq with open(file_path, "w") as f: for i in range(len(seq_list)): f.write(">" + name_list[i] + "\n" + seq_list[i] + "\n")
python
{ "resource": "" }
q7078
read_RNAplfold
train
def read_RNAplfold(tmpdir, maxlen=None, seq_align="start", pad_with="E"): """ pad_with = with which 2ndary structure should we pad the sequence? """ assert pad_with in {"P", "H", "I", "M", "E"} def read_profile(tmpdir, P): return [values.strip().split("\t") for seq_name, values in iter_fasta("{tmp}/{P}_profile.fa".format(tmp=tmpdir, P=P))] def nelem(P, pad_width): """get the right neutral element """ return 1 if P is pad_with else 0 arr_hime = np.array([pad_sequences(read_profile(tmpdir, P), value=[nelem(P, pad_with)], align=seq_align, maxlen=maxlen) for P in RNAplfold_PROFILES_EXECUTE], dtype="float32") # add the pairness column arr_p = 1 - arr_hime.sum(axis=0)[np.newaxis] arr = np.concatenate((arr_p, arr_hime)) # reshape to: seq, seq_length, num_channels arr = np.moveaxis(arr, 0, 2) return arr
python
{ "resource": "" }
q7079
ism
train
def ism(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs, output_filter_mask=None, out_annotation=None, diff_type="log_odds", rc_handling="maximum"): """In-silico mutagenesis Using ISM in with diff_type 'log_odds' and rc_handling 'maximum' will produce predictions as used in [DeepSEA](http://www.nature.com/nmeth/journal/v12/n10/full/nmeth.3547.html). ISM offers two ways to calculate the difference between the outputs created by reference and alternative sequence and two different methods to select whether to use the output generated from the forward or from the reverse-complement sequences. To calculate "e-values" as mentioned in DeepSEA the same ISM prediction has to be performed on a randomised set of 1 million 1000genomes, MAF-matched variants to get a background of predicted effects of random SNPs. # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. diff_type: "log_odds" or "diff". When set to 'log_odds' calculate scores based on log_odds, which assumes the model output is a probability. When set to 'diff' the model output for 'ref' is subtracted from 'alt'. Using 'log_odds' with outputs that are not in the range [0,1] nan will be returned. rc_handling: "average" or "maximum". Either average over the predictions derived from forward and reverse-complement predictions ('average') or pick the prediction with the bigger absolute value ('maximum'). # Returns Dictionary with the key `ism` which contains a pandas DataFrame containing the calculated values for each (selected) model output and input sequence """ seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc} assert diff_type in ["log_odds", "diff"] assert rc_handling in ["average", "maximum"] assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"]) assert get_seq_len(ref)[0] == mutation_positions.shape[0] assert len(mutation_positions.shape) == 1 # determine which outputs should be selected if output_filter_mask is None: if out_annotation is None: output_filter_mask = np.arange(out_annotation_all_outputs.shape[0]) else: output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0] # make sure the labels are assigned correctly out_annotation = out_annotation_all_outputs[output_filter_mask] preds = {} for k in seqs: # preds[k] = model.predict(seqs[k]) preds[k] = np.array(model.predict(seqs[k])[..., output_filter_mask]) if diff_type == "log_odds": if np.any([(preds[k].min() < 0 or preds[k].max() > 1) for k in preds]): warnings.warn("Using log_odds on model outputs that are not bound [0,1]") diffs = np.log(preds["alt"] / (1 - preds["alt"])) - np.log(preds["ref"] / (1 - preds["ref"])) diffs_rc = np.log(preds["alt_rc"] / (1 - preds["alt_rc"])) - np.log(preds["ref_rc"] / (1 - preds["ref_rc"])) elif diff_type == "diff": diffs = preds["alt"] - preds["ref"] diffs_rc = preds["alt_rc"] - preds["ref_rc"] if rc_handling == "average": diffs = np.mean([diffs, diffs_rc], axis=0) elif rc_handling == "maximum": replace_filt = np.abs(diffs) < np.abs(diffs_rc) diffs[replace_filt] = diffs_rc[replace_filt] diffs = pd.DataFrame(diffs, columns=out_annotation) return {"ism": diffs}
python
{ "resource": "" }
q7080
_train_and_eval_single
train
def _train_and_eval_single(train, valid, model, batch_size=32, epochs=300, use_weight=False, callbacks=[], eval_best=False, add_eval_metrics={}): """Fit and evaluate a keras model eval_best: if True, load the checkpointed model for evaluation """ def _format_keras_history(history): """nicely format keras history """ return {"params": history.params, "loss": merge_dicts({"epoch": history.epoch}, history.history), } if use_weight: sample_weight = train[2] else: sample_weight = None # train the model logger.info("Fit...") history = History() model.fit(train[0], train[1], batch_size=batch_size, validation_data=valid[:2], epochs=epochs, sample_weight=sample_weight, verbose=2, callbacks=[history] + callbacks) # get history hist = _format_keras_history(history) # load and eval the best model if eval_best: mcp = [x for x in callbacks if isinstance(x, ModelCheckpoint)] assert len(mcp) == 1 model = load_model(mcp[0].filepath) return eval_model(model, valid, add_eval_metrics), hist
python
{ "resource": "" }
q7081
eval_model
train
def eval_model(model, test, add_eval_metrics={}): """Evaluate model's performance on the test-set. # Arguments model: Keras model test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`. add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from the `concise.eval_metrics` module. # Returns dictionary with evaluation metrics """ # evaluate the model logger.info("Evaluate...") # - model_metrics model_metrics_values = model.evaluate(test[0], test[1], verbose=0, batch_size=test[1].shape[0]) # evaluation is done in a single pass to have more precise metics model_metrics = dict(zip(_listify(model.metrics_names), _listify(model_metrics_values))) # - eval_metrics y_true = test[1] y_pred = model.predict(test[0], verbose=0) eval_metrics = {k: v(y_true, y_pred) for k, v in add_eval_metrics.items()} # handle the case where the two metrics names intersect # - omit duplicates from eval_metrics intersected_keys = set(model_metrics).intersection(set(eval_metrics)) if len(intersected_keys) > 0: logger.warning("Some metric names intersect: {0}. Ignoring the add_eval_metrics ones". format(intersected_keys)) eval_metrics = _delete_keys(eval_metrics, intersected_keys) return merge_dicts(model_metrics, eval_metrics)
python
{ "resource": "" }
q7082
get_model
train
def get_model(model_fn, train_data, param): """Feed model_fn with train_data and param """ model_param = merge_dicts({"train_data": train_data}, param["model"], param.get("shared", {})) return model_fn(**model_param)
python
{ "resource": "" }
q7083
_delete_keys
train
def _delete_keys(dct, keys): """Returns a copy of dct without `keys` keys """ c = deepcopy(dct) assert isinstance(keys, list) for k in keys: c.pop(k) return c
python
{ "resource": "" }
q7084
_mean_dict
train
def _mean_dict(dict_list): """Compute the mean value across a list of dictionaries """ return {k: np.array([d[k] for d in dict_list]).mean() for k in dict_list[0].keys()}
python
{ "resource": "" }
q7085
CMongoTrials.get_trial
train
def get_trial(self, tid): """Retrieve trial by tid """ lid = np.where(np.array(self.tids) == tid)[0][0] return self.trials[lid]
python
{ "resource": "" }
q7086
CMongoTrials.delete_running
train
def delete_running(self, timeout_last_refresh=0, dry_run=False): """Delete jobs stalled in the running state for too long timeout_last_refresh, int: number of seconds """ running_all = self.handle.jobs_running() running_timeout = [job for job in running_all if coarse_utcnow() > job["refresh_time"] + timedelta(seconds=timeout_last_refresh)] if len(running_timeout) == 0: # Nothing to stop self.refresh_tids(None) return None if dry_run: logger.warning("Dry run. Not removing anything.") logger.info("Removing {0}/{1} running jobs. # all jobs: {2} ". format(len(running_timeout), len(running_all), len(self))) now = coarse_utcnow() logger.info("Current utc time: {0}".format(now)) logger.info("Time horizont: {0}".format(now - timedelta(seconds=timeout_last_refresh))) for job in running_timeout: logger.info("Removing job: ") pjob = job.to_dict() del pjob["misc"] # ignore misc when printing logger.info(pprint.pformat(pjob)) if not dry_run: self.handle.delete(job) logger.info("Job deleted") self.refresh_tids(None)
python
{ "resource": "" }
q7087
CMongoTrials.train_history
train
def train_history(self, tid=None): """Get train history as pd.DataFrame """ def result2history(result): if isinstance(result["history"], list): return pd.concat([pd.DataFrame(hist["loss"]).assign(fold=i) for i, hist in enumerate(result["history"])]) else: return pd.DataFrame(result["history"]["loss"]) # use all if tid is None: tid = self.valid_tid() res = [result2history(t["result"]).assign(tid=t["tid"]) for t in self.trials if t["tid"] in _listify(tid)] df = pd.concat(res) # reorder columns fold_name = ["fold"] if "fold" in df else [] df = _put_first(df, ["tid"] + fold_name + ["epoch"]) return df
python
{ "resource": "" }
q7088
CMongoTrials.as_df
train
def as_df(self, ignore_vals=["history"], separator=".", verbose=True): """Return a pd.DataFrame view of the whole experiment """ def add_eval(res): if "eval" not in res: if isinstance(res["history"], list): # take the average across all folds eval_names = list(res["history"][0]["loss"].keys()) eval_metrics = np.array([[v[-1] for k, v in hist["loss"].items()] for hist in res["history"]]).mean(axis=0).tolist() res["eval"] = {eval_names[i]: eval_metrics[i] for i in range(len(eval_metrics))} else: res["eval"] = {k: v[-1] for k, v in res["history"]["loss"].items()} return res def add_n_epoch(df): df_epoch = self.train_history().groupby("tid")["epoch"].max().reset_index() df_epoch.rename(columns={"epoch": "n_epoch"}, inplace=True) return pd.merge(df, df_epoch, on="tid", how="left") results = self.get_ok_results(verbose=verbose) rp = [_flatten_dict(_delete_keys(add_eval(x), ignore_vals), separator) for x in results] df = pd.DataFrame.from_records(rp) df = add_n_epoch(df) first = ["tid", "loss", "status"] return _put_first(df, first)
python
{ "resource": "" }
q7089
effect_from_model
train
def effect_from_model(model, ref, ref_rc, alt, alt_rc, methods, mutation_positions, out_annotation_all_outputs, extra_args=None, **argv): """Convenience function to execute multiple effect predictions in one call # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument methods: A list of prediction functions to be executed, e.g.: from concise.effects.ism.ism. Using the same function more often than once (even with different parameters) will overwrite the results of the previous calculation of that function. mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. extra_args: None or a list of the same length as 'methods'. The elements of the list are dictionaries with additional arguments that should be passed on to the respective functions in 'methods'. Arguments defined here will overwrite arguments that are passed to all methods. **argv: Additional arguments to be passed on to all methods, e.g,: out_annotation. # Returns Dictionary containing the results of the individual calculations, the keys are the names of the executed functions """ assert isinstance(methods, list) if isinstance(extra_args, list): assert(len(extra_args) == len(methods)) else: extra_args = [None] * len(methods) main_args = {"model": model, "ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc, "mutation_positions": mutation_positions, "out_annotation_all_outputs": out_annotation_all_outputs} pred_results = {} for method, xargs in zip(methods, extra_args): if xargs is not None: if isinstance(xargs, dict): for k in argv: if k not in xargs: xargs[k] = argv[k] else: xargs = argv for k in main_args: xargs[k] = main_args[k] res = method(**xargs) pred_results[method.__name__] = res return pred_results
python
{ "resource": "" }
q7090
trades
train
def trades(ctx, market, limit, start, stop): """ List trades in a market """ market = Market(market, bitshares_instance=ctx.bitshares) t = [["time", "quote", "base", "price"]] for trade in market.trades(limit, start=start, stop=stop): t.append( [ str(trade["time"]), str(trade["quote"]), str(trade["base"]), "{:f} {}/{}".format( trade["price"], trade["base"]["asset"]["symbol"], trade["quote"]["asset"]["symbol"], ), ] ) print_table(t)
python
{ "resource": "" }
q7091
ticker
train
def ticker(ctx, market): """ Show ticker of a market """ market = Market(market, bitshares_instance=ctx.bitshares) ticker = market.ticker() t = [["key", "value"]] for key in ticker: t.append([key, str(ticker[key])]) print_table(t)
python
{ "resource": "" }
q7092
cancel
train
def cancel(ctx, orders, account): """ Cancel one or multiple orders """ print_tx(ctx.bitshares.cancel(orders, account=account))
python
{ "resource": "" }
q7093
orderbook
train
def orderbook(ctx, market): """ Show the orderbook of a particular market """ market = Market(market, bitshares_instance=ctx.bitshares) orderbook = market.orderbook() ta = {} ta["bids"] = [["quote", "sum quote", "base", "sum base", "price"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["bids"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["bids"].append( [ str(order["quote"]), str(cumsumquote), str(order["base"]), str(cumsumbase), "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), ] ) ta["asks"] = [["price", "base", "sum base", "quote", "sum quote"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["asks"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["asks"].append( [ "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), str(order["base"]), str(cumsumbase), str(order["quote"]), str(cumsumquote), ] ) t = [["bids", "asks"]] t.append([format_table(ta["bids"]), format_table(ta["asks"])]) print_table(t)
python
{ "resource": "" }
q7094
buy
train
def buy(ctx, buy_amount, buy_asset, price, sell_asset, order_expiration, account): """ Buy a specific asset at a certain rate against a base asset """ amount = Amount(buy_amount, buy_asset) price = Price( price, base=sell_asset, quote=buy_asset, bitshares_instance=ctx.bitshares ) print_tx( price.market.buy(price, amount, account=account, expiration=order_expiration) )
python
{ "resource": "" }
q7095
openorders
train
def openorders(ctx, account): """ List open orders of an account """ account = Account( account or config["default_account"], bitshares_instance=ctx.bitshares ) t = [["Price", "Quote", "Base", "ID"]] for o in account.openorders: t.append( [ "{:f} {}/{}".format( o["price"], o["base"]["asset"]["symbol"], o["quote"]["asset"]["symbol"], ), str(o["quote"]), str(o["base"]), o["id"], ] ) print_table(t)
python
{ "resource": "" }
q7096
cancelall
train
def cancelall(ctx, market, account): """ Cancel all orders of an account in a market """ market = Market(market) ctx.bitshares.bundle = True market.cancel([x["id"] for x in market.accountopenorders(account)], account=account) print_tx(ctx.bitshares.txbuffer.broadcast())
python
{ "resource": "" }
q7097
spread
train
def spread(ctx, market, side, min, max, num, total, order_expiration, account): """ Place multiple orders \b :param str market: Market pair quote:base (e.g. USD:BTS) :param str side: ``buy`` or ``sell`` quote :param float min: minimum price to place order at :param float max: maximum price to place order at :param int num: Number of orders to place :param float total: Total amount of quote to use for all orders :param int order_expiration: Number of seconds until the order expires from the books """ from tqdm import tqdm from numpy import linspace market = Market(market) ctx.bitshares.bundle = True if min < max: space = linspace(min, max, num) else: space = linspace(max, min, num) func = getattr(market, side) for p in tqdm(space): func(p, total / float(num), account=account, expiration=order_expiration) print_tx(ctx.bitshares.txbuffer.broadcast())
python
{ "resource": "" }
q7098
updateratio
train
def updateratio(ctx, symbol, ratio, account): """ Update the collateral ratio of a call positions """ from bitshares.dex import Dex dex = Dex(bitshares_instance=ctx.bitshares) print_tx(dex.adjust_collateral_ratio(symbol, ratio, account=account))
python
{ "resource": "" }
q7099
bidcollateral
train
def bidcollateral( ctx, collateral_symbol, collateral_amount, debt_symbol, debt_amount, account ): """ Bid for collateral in the settlement fund """ print_tx( ctx.bitshares.bid_collateral( Amount(collateral_amount, collateral_symbol), Amount(debt_amount, debt_symbol), account=account, ) )
python
{ "resource": "" }