code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return Input((seq_length, len(RNAplfold_PROFILES)), name=name, **kwargs)
def InputRNAStructure(seq_length, name=None, **kwargs)
Input placeholder for array returned by `encodeRNAStructure` Wrapper for: `keras.layers.Input((seq_length, 5), name=name, **kwargs)`
14.213018
12.730412
1.116462
return Input((seq_length, n_bases), name=name, **kwargs)
def InputSplines(seq_length, n_bases=10, name=None, **kwargs)
Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)`
3.783038
3.038352
1.245095
return Input((seq_length, n_bases), name=name, **kwargs)
def InputSplines1D(seq_length, n_bases=10, name=None, **kwargs)
Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)`
3.813198
3.025323
1.260427
return Input((seq_length, n_features), name=name, **kwargs)
def InputDNAQuantity(seq_length, n_features=1, name=None, **kwargs)
Convenience wrapper around `keras.layers.Input`: `Input((seq_length, n_features), name=name, **kwargs)`
3.556606
3.114556
1.14193
return Input((seq_length, n_bases), name=name, **kwargs)
def InputDNAQuantitySplines(seq_length, n_bases=10, name="DNASmoothPosition", **kwargs)
Convenience wrapper around keras.layers.Input: `Input((seq_length, n_bases), name=name, **kwargs)`
4.235785
3.13787
1.349892
W = self.get_weights()[0] if index is None: index = np.arange(W.shape[2]) fig = heatmap(np.swapaxes(W[:, :, index], 0, 1), plot_name="filter: ", vocab=self.VOCAB, figsize=figsize, **kwargs) # plt.show() return fig
def _plot_weights_heatmap(self, index=None, figsize=None, **kwargs)
Plot weights as a heatmap index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap
4.863639
5.238806
0.928387
w_all = self.get_weights() if len(w_all) == 0: raise Exception("Layer needs to be initialized first") W = w_all[0] if index is None: index = np.arange(W.shape[2]) if isinstance(index, int): index = [index] fig = plt.figure(figsize=figsize) if plot_type == "motif_pwm" and plot_type in self.AVAILABLE_PLOTS: arr = pssm_array2pwm_array(W, background_probs) elif plot_type == "motif_raw" and plot_type in self.AVAILABLE_PLOTS: arr = W elif plot_type == "motif_pwm_info" and plot_type in self.AVAILABLE_PLOTS: quasi_pwm = pssm_array2pwm_array(W, background_probs) arr = _pwm2pwm_info(quasi_pwm) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS)) fig = seqlogo_fig(arr, vocab=self.VOCAB_name, figsize=figsize, ncol=ncol, plot_name="filter: ") # fig.show() return fig
def _plot_weights_motif(self, index, plot_type="motif_raw", background_probs=DEFAULT_BASE_BACKGROUND, ncol=1, figsize=None)
Index can only be a single int
3.330112
3.337621
0.99775
if "heatmap" in self.AVAILABLE_PLOTS and plot_type == "heatmap": return self._plot_weights_heatmap(index=index, figsize=figsize, ncol=ncol, **kwargs) elif plot_type[:5] == "motif": return self._plot_weights_motif(index=index, plot_type=plot_type, figsize=figsize, ncol=ncol, **kwargs) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS))
def plot_weights(self, index=None, plot_type="motif_raw", figsize=None, ncol=1, **kwargs)
Plot filters as heatmap or motifs index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap
2.138205
2.092632
1.021778
for pwm in pwm_list: if not isinstance(pwm, PWM): raise TypeError("element {0} of pwm_list is not of type PWM".format(pwm)) return True
def _check_pwm_list(pwm_list)
Check the input validity
2.885217
2.821064
1.022741
''' Add noise with truncnorm from numpy. Bounded (0.001,0.999) ''' # within range () # provide entry to chose which adding noise way to use if seed is not None: np.random.seed(seed) if stddev == 0: X = mean else: gen_X = truncnorm((alpha - mean) / stddev, ((1 - alpha) - mean) / stddev, loc=mean, scale=stddev) X = gen_X.rvs() + mean if normalize: # Normalize, column sum to 1 col_sums = X.sum(1) X = X / col_sums[:, np.newaxis] return X
def _truncated_normal(mean, stddev, seed=None, normalize=True, alpha=0.01)
Add noise with truncnorm from numpy. Bounded (0.001,0.999)
5.60362
4.36755
1.283012
# Generate y and x values from the dimension lengths assert len(vocab) == w.shape[0] plt_y = np.arange(w.shape[0] + 1) + 0.5 plt_x = np.arange(w.shape[1] + 1) - 0.5 z_min = w.min() z_max = w.max() if vmin is None: vmin = z_min if vmax is None: vmax = z_max if diverge_color: color_map = plt.cm.RdBu else: color_map = plt.cm.Blues fig = plt.figure(figsize=figsize) # multiple axis if len(w.shape) == 3: # n_plots = w.shape[2] nrow = math.ceil(n_plots / ncol) else: n_plots = 1 nrow = 1 ncol = 1 for i in range(n_plots): if len(w.shape) == 3: w_cur = w[:, :, i] else: w_cur = w ax = plt.subplot(nrow, ncol, i + 1) plt.tight_layout() im = ax.pcolormesh(plt_x, plt_y, w_cur, cmap=color_map, vmin=vmin, vmax=vmax, edgecolors="white") ax.grid(False) ax.set_yticklabels([""] + vocab, minor=False) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) ax.set_xticks(np.arange(w_cur.shape[1] + 1)) ax.set_xlim(plt_x.min(), plt_x.max()) ax.set_ylim(plt_y.min(), plt_y.max()) # nice scale location: # http://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(im, cax=cax) if plot_name is not None: if n_plots > 0: pln = plot_name + " {0}".format(i) else: pln = plot_name ax.set_title(pln) ax.set_aspect('equal') return fig
def heatmap(w, vmin=None, vmax=None, diverge_color=False, ncol=1, plot_name=None, vocab=["A", "C", "G", "T"], figsize=(6, 2))
Plot a heatmap from weight matrix w vmin, vmax = z axis range diverge_color = Should we use diverging colors? plot_name = plot_title vocab = vocabulary (corresponds to the first axis)
2.080767
2.07508
1.002741
# find all of the polygons in the letter (for instance an A # needs to be constructed from 2 polygons) path_strs = re.findall("\(\(([^\)]+?)\)\)", data_str.strip()) # convert the data into a numpy array polygons_data = [] for path_str in path_strs: data = np.array([ tuple(map(float, x.split())) for x in path_str.strip().split(",")]) polygons_data.append(data) # standardize the coordinates min_coords = np.vstack(data.min(0) for data in polygons_data).min(0) max_coords = np.vstack(data.max(0) for data in polygons_data).max(0) for data in polygons_data: data[:, ] -= min_coords data[:, ] /= (max_coords - min_coords) polygons = [] for data in polygons_data: polygons.append(load_wkt( "POLYGON((%s))" % ",".join(" ".join(map(str, x)) for x in data))) return tuple(polygons)
def standardize_polygons_str(data_str)
Given a POLYGON string, standardize the coordinates to a 1x1 grid. Input : data_str (taken from above) Output: tuple of polygon objects
2.873241
2.856057
1.006017
if len(let) == 2: colors = [col, "white"] elif len(let) == 1: colors = [col] else: raise ValueError("3 or more Polygons are not supported") for polygon, color in zip(let, colors): new_polygon = affinity.scale( polygon, yfact=height, origin=(0, 0, 0)) new_polygon = affinity.translate( new_polygon, xoff=x, yoff=y) patch = PolygonPatch( new_polygon, edgecolor=color, facecolor=color) ax.add_patch(patch) return
def add_letter_to_axis(ax, let, col, x, y, height)
Add 'let' with position x,y and height height to matplotlib axis 'ax'.
2.570369
2.524042
1.018355
ax = ax or plt.gca() assert letter_heights.shape[1] == len(VOCABS[vocab]) x_range = [1, letter_heights.shape[0]] pos_heights = np.copy(letter_heights) pos_heights[letter_heights < 0] = 0 neg_heights = np.copy(letter_heights) neg_heights[letter_heights > 0] = 0 for x_pos, heights in enumerate(letter_heights): letters_and_heights = sorted(zip(heights, list(VOCABS[vocab].keys()))) y_pos_pos = 0.0 y_neg_pos = 0.0 for height, letter in letters_and_heights: color = VOCABS[vocab][letter] polygons = letter_polygons[letter] if height > 0: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_pos_pos, height) y_pos_pos += height else: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_neg_pos, height) y_neg_pos += height # if add_hline: # ax.axhline(color="black", linewidth=1) ax.set_xlim(x_range[0] - 1, x_range[1] + 1) ax.grid(False) ax.set_xticks(list(range(*x_range)) + [x_range[-1]]) ax.set_aspect(aspect='auto', adjustable='box') ax.autoscale_view()
def seqlogo(letter_heights, vocab="DNA", ax=None)
Make a logo plot # Arguments letter_heights: "motif length" x "vocabulary size" numpy array Can also contain negative values. vocab: str, Vocabulary name. Can be: DNA, RNA, AA, RNAStruct. ax: matplotlib axis
2.299727
2.339683
0.982922
ac_list = [(accuracy["train_acc_final"], accuracy["test_acc_final"] ) for accuracy, weights in res] ac = np.array(ac_list) perf = { "mean_train_acc": np.mean(ac[:, 0]), "std_train_acc": np.std(ac[:, 0]), "mean_test_acc": np.mean(ac[:, 1]), "std_test_acc": np.std(ac[:, 1]), } return perf
def get_cv_accuracy(res)
Extract the cv accuracy from the model
2.78096
2.734432
1.017016
tokens = one_hot2token(arr) indexToLetter = _get_index_dict(vocab) return [''.join([indexToLetter[x] for x in row]) for row in tokens]
def one_hot2string(arr, vocab)
Convert a one-hot encoded array back to string
6.021876
5.956554
1.010966
# Req: all vocabs have the same length if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] nchar = len(vocab[0]) for l in vocab + neutral_vocab: assert len(l) == nchar assert len(seq) % nchar == 0 # since we are using striding vocab_dict = _get_vocab_dict(vocab) for l in neutral_vocab: vocab_dict[l] = -1 # current performance bottleneck return [vocab_dict[seq[(i * nchar):((i + 1) * nchar)]] for i in range(len(seq) // nchar)]
def tokenize(seq, vocab, neutral_vocab=[])
Convert sequence to integers # Arguments seq: Sequence to encode vocab: Vocabulary to use neutral_vocab: Neutral vocabulary -> assign those values to -1 # Returns List of length `len(seq)` with integers from `-1` to `len(vocab) - 1`
4.245658
4.438102
0.956638
arr = np.zeros((len(tvec), vocab_size)) tvec_range = np.arange(len(tvec)) tvec = np.asarray(tvec) arr[tvec_range[tvec >= 0], tvec[tvec >= 0]] = 1 return arr
def token2one_hot(tvec, vocab_size)
Note: everything out of the vucabulary is transformed into `np.zeros(vocab_size)`
2.447105
2.477497
0.987733
if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] if isinstance(seq_vec, str): raise ValueError("seq_vec should be an iterable returning " + "strings not a string itself") assert len(vocab[0]) == len(pad_value) assert pad_value in neutral_vocab assert encode_type in ["one_hot", "token"] seq_vec = pad_sequences(seq_vec, maxlen=maxlen, align=seq_align, value=pad_value) if encode_type == "one_hot": arr_list = [token2one_hot(tokenize(seq, vocab, neutral_vocab), len(vocab)) for i, seq in enumerate(seq_vec)] elif encode_type == "token": arr_list = [1 + np.array(tokenize(seq, vocab, neutral_vocab)) for seq in seq_vec] # we add 1 to be compatible with keras: https://keras.io/layers/embeddings/ # indexes > 0, 0 = padding element return np.stack(arr_list)
def encodeSequence(seq_vec, vocab, neutral_vocab, maxlen=None, seq_align="start", pad_value="N", encode_type="one_hot")
Convert a list of genetic sequences into one-hot-encoded array. # Arguments seq_vec: list of strings (genetic sequences) vocab: list of chars: List of "words" to use as the vocabulary. Can be strings of length>0, but all need to have the same length. For DNA, this is: ["A", "C", "G", "T"]. neutral_vocab: list of chars: Values used to pad the sequence or represent unknown-values. For DNA, this is: ["N"]. maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? encode_type: "one_hot" or "token". "token" represents each vocab element as a positive integer from 1 to len(vocab) + 1. neutral_vocab is represented with 0. # Returns Array with shape for encode_type: - "one_hot": `(len(seq_vec), maxlen, len(vocab))` - "token": `(len(seq_vec), maxlen)` If `maxlen=None`, it gets the value of the longest sequence length from `seq_vec`.
3.363976
3.244384
1.036861
return encodeSequence(seq_vec, vocab=DNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
def encodeDNA(seq_vec, maxlen=None, seq_align="start")
Convert the DNA sequence into 1-hot-encoding numpy array # Arguments seq_vec: list of chars List of sequences that can have different lengths maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? # Returns 3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4) # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8) >>> X_seq.shape (2, 8, 4) >>> print(X_seq) [[[0 0 0 1] [1 0 0 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [1 0 0 0] [0 0 1 0] [1 0 0 0]] [[0 0 0 0] [0 0 0 0] [0 0 0 1] [0 1 0 0] [0 0 0 1] [0 0 0 1] [0 0 0 1] [1 0 0 0]]] ```
5.084891
6.805484
0.747176
return encodeSequence(seq_vec, vocab=RNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
def encodeRNA(seq_vec, maxlen=None, seq_align="start")
Convert the RNA sequence into 1-hot-encoding numpy array as for encodeDNA
5.387782
5.01909
1.073458
if ignore_stop_codons: vocab = CODONS neutral_vocab = STOP_CODONS + ["NNN"] else: vocab = CODONS + STOP_CODONS neutral_vocab = ["NNN"] # replace all U's with A's? seq_vec = [str(seq).replace("U", "T") for seq in seq_vec] return encodeSequence(seq_vec, vocab=vocab, neutral_vocab=neutral_vocab, maxlen=maxlen, seq_align=seq_align, pad_value="NNN", encode_type=encode_type)
def encodeCodon(seq_vec, ignore_stop_codons=True, maxlen=None, seq_align="start", encode_type="one_hot")
Convert the Codon sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/DNA sequences ignore_stop_codons: boolean; if True, STOP_CODONS are omitted from one-hot encoding. maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen / 3, 61 if ignore_stop_codons else 64)`
3.012142
2.946077
1.022425
return encodeSequence(seq_vec, vocab=AMINO_ACIDS, neutral_vocab="_", maxlen=maxlen, seq_align=seq_align, pad_value="_", encode_type=encode_type)
def encodeAA(seq_vec, maxlen=None, seq_align="start", encode_type="one_hot")
Convert the Amino-acid sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/amino-acid sequences maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen, 22)`
4.773817
5.508833
0.866575
if isinstance(gtf, str): _logger.info("Reading gtf file..") gtf = read_gtf(gtf) _logger.info("Done") _logger.info("Running landmark extractors..") # landmarks to a dictionary with a function assert isinstance(landmarks, (list, tuple, set, dict)) if isinstance(landmarks, dict): landmarks = {k: _get_fun(v) for k, v in landmarks.items()} else: landmarks = {_to_string(fn_str): _get_fun(fn_str) for fn_str in landmarks} r = {k: _validate_pos(v(gtf)) for k, v in landmarks.items()} _logger.info("Done!") return r
def extract_landmarks(gtf, landmarks=ALL_LANDMARKS)
Given an gene annotation GFF/GTF file, # Arguments gtf: File path or a loaded `pd.DataFrame` with columns: seqname, feature, start, end, strand landmarks: list or a dictionary of landmark extractors (function or name) # Note When landmark extractor names are used, they have to be implemented in the module `concise.preprocessing.position` # Returns Dictionary of pd.DataFrames with landmark positions (columns: seqname, position, strand)
3.317635
3.281051
1.01115
assert isinstance(df, pd.DataFrame) assert ["seqname", "position", "strand"] == df.columns.tolist() assert df.position.dtype == np.dtype("int64") assert df.strand.dtype == np.dtype("O") assert df.seqname.dtype == np.dtype("O") return df
def _validate_pos(df)
Validates the returned positional object
2.563419
2.515666
1.018982
# assert k >= 0 with tf.name_scope(scope, 'L1Loss', [tensor]): loss = tf.reduce_mean(tf.select(tf.abs(tensor) < k, 0.5 * tf.square(tensor), k * tf.abs(tensor) - 0.5 * k ^ 2) ) return loss
def huber_loss(tensor, k=1, scope=None)
Define a huber loss https://en.wikipedia.org/wiki/Huber_loss tensor: tensor to regularize. k: value of k in the huber loss scope: Optional scope for op_scope. Huber loss: f(x) = if |x| <= k: 0.5 * x^2 else: k * |x| - 0.5 * k^2 Returns: the L1 loss op.
2.66788
2.660455
1.002791
dt = pd.read_table(ATTRACT_METADTA) dt.rename(columns={"Matrix_id": "PWM_id"}, inplace=True) # put to firt place cols = ['PWM_id'] + [col for col in dt if col != 'PWM_id'] # rename Matrix_id to PWM_id return dt[cols]
def get_metadata()
Get pandas.DataFrame with metadata about the Attract PWM's. Columns: - PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm - Gene_name - Gene_id - Mutated (if the target gene is mutated) - Organism - Motif (concsensus motif) - Len (lenght of the motif) - Experiment_description(when available) - Database (Database from where the motifs were extracted PDB: Protein data bank, C: Cisbp-RNA, R:RBPDB, S: Spliceaid-F, AEDB:ASD) - Pubmed (pubmed ID) - Experiment (type of experiment; short description) - Family (domain) - Score (Qscore refer to the paper)
8.624002
6.044611
1.426726
l = load_motif_db(ATTRACT_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[str(m)] + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001)
Get a list of Attract PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances.
4.783738
4.979681
0.960652
loss_fn = kloss.deserialize(loss) def masked_loss_fn(y_true, y_pred): # currently not suppoerd with NA's: # - there is no K.is_nan impolementation in keras.backend # - https://github.com/fchollet/keras/issues/1628 mask = K.cast(K.not_equal(y_true, mask_value), K.floatx()) # we divide by the mean to correct for the number of done loss evaluations return loss_fn(y_true * mask, y_pred * mask) / K.mean(mask) masked_loss_fn.__name__ = loss + "_masked" return masked_loss_fn
def mask_loss(loss, mask_value=MASK_VALUE)
Generates a new loss function that ignores values where `y_true == mask_value`. # Arguments loss: str; name of the keras loss function from `keras.losses` mask_value: int; which values should be masked # Returns function; Masked version of the `loss` # Example ```python categorical_crossentropy_masked = mask_loss("categorical_crossentropy") ```
4.68388
4.915209
0.952936
l = load_motif_db(HOCOMOCO_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(_normalize_pwm(l[m]) + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001)
Get a list of HOCOMOCO PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances.
4.800229
5.216824
0.920144
if self.is_trained() is False: # print("Model not fitted yet. Use object.fit() to fit the model.") return None var_res = self._var_res weights = self._var_res_to_weights(var_res) # save to the side weights["final_bias_fit"] = weights["final_bias"] weights["feature_weights_fit"] = weights["feature_weights"] return weights
def get_weights(self)
Returns: dict: Model's trained weights.
6.685046
6.255672
1.068638
# transform the weights into our form motif_base_weights_raw = var_res["motif_base_weights"][0] motif_base_weights = np.swapaxes(motif_base_weights_raw, 0, 2) # get weights motif_weights = var_res["motif_weights"] motif_bias = var_res["motif_bias"] final_bias = var_res["final_bias"] feature_weights = var_res["feature_weights"] # get the GAM prediction: spline_pred = None spline_weights = None if self._param["n_splines"] is not None: spline_pred = self._splines["X_spline"].dot(var_res["spline_weights"]) if self._param["spline_exp"] is True: spline_pred = np.exp(spline_pred) else: spline_pred = (spline_pred + 1) spline_pred.reshape([-1]) spline_weights = var_res["spline_weights"] weights = {"motif_base_weights": motif_base_weights, "motif_weights": motif_weights, "motif_bias": motif_bias, "final_bias": final_bias, "feature_weights": feature_weights, "spline_pred": spline_pred, "spline_weights": spline_weights } return weights
def _var_res_to_weights(self, var_res)
Get model weights
2.774234
2.712232
1.02286
with tf.Session(graph=graph) as sess: sess.run(other_var["init"]) # all_vars = tf.all_variables() # print("All variable names") # print([var.name for var in all_vars]) # print("All variable values") # print(sess.run(all_vars)) var_res = self._get_var_res_sess(sess, var) return var_res
def _get_var_res(self, graph, var, other_var)
Get the weights from our graph
3.089385
2.960026
1.043702
with graph.as_default(): var = {} for key, value in var_res.items(): if value is not None: var[key] = tf.Variable(value, name="tf_%s" % key) else: var[key] = None return var
def _convert_to_var(self, graph, var_res)
Create tf.Variables from a list of numpy arrays var_res: dictionary of numpy arrays with the key names corresponding to var
2.657722
2.63548
1.008439
# other_var["tf_X_seq"]: X_seq, tf_y: y, feed_dict = {other_var["tf_X_feat"]: X_feat, other_var["tf_X_seq"]: X_seq} y_pred = sess.run(other_var[variable], feed_dict=feed_dict) return y_pred
def _predict_in_session(self, sess, other_var, X_feat, X_seq, variable="y_pred")
Predict y (or any other variable) from inside the tf session. Variable has to be in other_var
2.718786
2.631214
1.033282
y_pred = self._predict_in_session(sess, other_var, X_feat, X_seq) return ce.mse(y_pred, y)
def _accuracy_in_session(self, sess, other_var, X_feat, X_seq, y)
Compute the accuracy from inside the tf session
3.03722
3.1614
0.96072
# insert one dimension - backcompatiblity X_seq = np.expand_dims(X_seq, axis=1) return self._get_other_var(X_feat, X_seq, variable="y_pred")
def predict(self, X_feat, X_seq)
Predict the response variable :py:attr:`y` for new input data (:py:attr:`X_feat`, :py:attr:`X_seq`). Args: X_feat: Feature design matrix. Same format as :py:attr:`X_feat` in :py:meth:`train` X_seq: Sequenc design matrix. Same format as :py:attr:`X_seq` in :py:meth:`train`
7.394643
8.858438
0.834757
if self.is_trained() is False: print("Model not fitted yet. Use object.fit() to fit the model.") return # input check: assert X_seq.shape[0] == X_feat.shape[0] # TODO - check this # sequence can be wider or thinner? # assert self._param["seq_length"] == X_seq.shape[2] assert self._param["n_add_features"] == X_feat.shape[1] # setup graph and variables graph = tf.Graph() var = self._convert_to_var(graph, self._var_res) other_var = self._build_graph(graph, var) with tf.Session(graph=graph) as sess: sess.run(other_var["init"]) # predict y = self._predict_in_session(sess, other_var, X_feat, X_seq, variable) return y
def _get_other_var(self, X_feat, X_seq, variable="y_pred")
Get the value of a variable from other_vars (from a tf-graph)
4.725041
4.565965
1.034839
final_res = { "param": self._param, "unused_param": self.unused_param, "execution_time": self._exec_time, "output": {"accuracy": self.get_accuracy(), "weights": self.get_weights(), "splines": self._splines } } return final_res
def to_dict(self)
Returns: dict: Concise represented as a dictionary.
5.388653
5.09722
1.057175
if weights is None: return # layer 1 motif_base_weights_raw = np.swapaxes(weights["motif_base_weights"], 2, 0) motif_base_weights = motif_base_weights_raw[np.newaxis] motif_bias = weights["motif_bias"] feature_weights = weights["feature_weights"] spline_weights = weights["spline_weights"] # filter motif_weights = weights["motif_weights"] final_bias = weights["final_bias"] var_res = { "motif_base_weights": motif_base_weights, "motif_bias": motif_bias, "spline_weights": spline_weights, "feature_weights": feature_weights, "motif_weights": motif_weights, "final_bias": final_bias } # cast everything to float32 var_res = {key: value.astype(np.float32) if value is not None else None for key, value in var_res.items()} self._var_res = var_res
def _set_var_res(self, weights)
Transform the weights to var_res
2.42868
2.404389
1.010103
# convert the output into a proper form obj_dict['output'] = helper.rec_dict_to_numpy_dict(obj_dict["output"]) helper.dict_to_numpy_dict(obj_dict['output']) if "trained_global_model" in obj_dict.keys(): raise Exception("Found trained_global_model feature in dictionary. Use ConciseCV.load to load this file.") dc = Concise(**obj_dict["param"]) # touch the hidden arguments dc._param = obj_dict["param"] if obj_dict["output"]["weights"] is None: dc._model_fitted = False else: dc._model_fitted = True dc._exec_time = obj_dict["execution_time"] dc.unused_param = obj_dict["unused_param"] dc._accuracy = obj_dict["output"]["accuracy"] dc._splines = obj_dict["output"]["splines"] weights = obj_dict["output"]["weights"] if weights is not None: # fix the dimensionality of X_feat in case it was 0 dimensional if weights["feature_weights"].shape == (0,): weights["feature_weights"].shape = (0, obj_dict["param"]["num_tasks"]) dc._set_var_res(weights) return dc
def from_dict(cls, obj_dict)
Load the object from a dictionary (produced with :py:func:`Concise.to_dict`) Returns: Concise: Loaded Concise object.
5.907323
5.558915
1.062676
# convert back to numpy data = helper.read_json(file_path) return Concise.from_dict(data)
def load(cls, file_path)
Load the object from a JSON file (saved with :py:func:`Concise.save`). Returns: Concise: Loaded Concise object.
13.85389
8.767258
1.580185
# n_folds = self._n_folds # use_stored = self._use_stored_folds # n_rows = self._n_rows if use_stored is not None: # path = '~/concise/data-offline/lw-pombe/cv_folds_5.json' with open(os.path.expanduser(use_stored)) as json_file: json_data = json.load(json_file) # check if we have the same number of rows and folds: if json_data['N_rows'] != n_rows: raise Exception('N_rows from folds doesnt match the number of rows of X_seq, X_feat, y') if json_data['N_folds'] != n_folds: raise Exception('n_folds dont match', json_data['N_folds'], n_folds) kf = [(np.array(train), np.array(test)) for (train, test) in json_data['folds']] else: kf = KFold(n_splits=n_folds).split(np.zeros((n_rows, 1))) # store in a list i = 1 folds = [] for train, test in kf: fold = "fold_" + str(i) folds.append((fold, train, test)) i = i + 1 return folds
def _get_folds(n_rows, n_folds, use_stored)
Get the used CV folds
3.307963
3.243745
1.019797
# TODO: input check - dimensions self._use_stored_folds = use_stored_folds self._n_folds = n_folds self._n_rows = X_feat.shape[0] # TODO: - fix the get_cv_accuracy # save: # - each model # - each model's performance # - each model's predictions # - globally: # - mean perfomance # - sd performance # - predictions self._kf = self._get_folds(self._n_rows, self._n_folds, self._use_stored_folds) cv_obj = {} if id_vec is None: id_vec = np.arange(1, self._n_rows + 1) best_val_acc_epoch_l = [] for fold, train, test in self._kf: X_feat_train = X_feat[train] X_seq_train = X_seq[train] y_train = y[train] X_feat_test = X_feat[test] X_seq_test = X_seq[test] y_test = y[test] id_vec_test = id_vec[test] print(fold, "/", n_folds) # copy the object dc = copy.deepcopy(self._concise_model) dc.train(X_feat_train, X_seq_train, y_train, X_feat_test, X_seq_test, y_test, n_cores=n_cores ) dc._test(X_feat_test, X_seq_test, y_test, id_vec_test) cv_obj[fold] = dc best_val_acc_epoch_l.append(dc.get_accuracy()["best_val_acc_epoch"]) self._cv_model = cv_obj # additionaly train the global model if train_global_model: dc = copy.deepcopy(self._concise_model) # overwrite n_epochs with the best average number of best epochs dc._param["n_epochs"] = int(np.array(best_val_acc_epoch_l).mean()) print("tranining global model with n_epochs = " + str(dc._param["n_epochs"])) dc.train(X_feat, X_seq, y, n_cores=n_cores ) dc._test(X_feat, X_seq, y, id_vec) self._concise_global_model = dc
def train(self, X_feat, X_seq, y, id_vec=None, n_folds=10, use_stored_folds=None, n_cores=1, train_global_model=False)
Train the Concise model in cross-validation. Args: X_feat: See :py:func:`concise.Concise.train` X_seq: See :py:func:`concise.Concise.train` y: See :py:func:`concise.Concise.train` id_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`. n_folds (int): Number of CV-folds to use. use_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. train_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`).
2.893247
2.84484
1.017016
# TODO: get it from the test_prediction ... # test_id, prediction # sort by test_id predict_vec = np.zeros((self._n_rows, self._concise_model._num_tasks)) for fold, train, test in self._kf: acc = self._cv_model[fold].get_accuracy() predict_vec[test, :] = acc["y_test_prediction"] return predict_vec
def get_CV_prediction(self)
Returns: np.ndarray: Predictions on the hold-out folds (unseen data, corresponds to :py:attr:`y`).
9.548344
9.029372
1.057476
accuracy = {} for fold, train, test in self._kf: acc = self._cv_model[fold].get_accuracy() accuracy[fold] = acc["test_acc_final"] return accuracy
def get_CV_accuracy(self)
Returns: float: Prediction accuracy in CV.
6.193666
6.516443
0.950467
param = { "n_folds": self._n_folds, "n_rows": self._n_rows, "use_stored_folds": self._use_stored_folds } if self._concise_global_model is None: trained_global_model = None else: trained_global_model = self._concise_global_model.to_dict() obj_dict = {"param": param, "folds": self._kf, "init_model": self._concise_model.to_dict(), "trained_global_model": trained_global_model, "output": {fold: model.to_dict() for fold, model in self.get_CV_models().items()} } return obj_dict
def to_dict(self)
Returns: dict: ConciseCV represented as a dictionary.
3.53784
3.355264
1.054415
default_model = Concise() cvdc = ConciseCV(default_model) cvdc._from_dict(obj_dict) return cvdc
def from_dict(cls, obj_dict)
Load the object from a dictionary (produced with :py:func:`ConciseCV.to_dict`) Returns: ConciseCV: Loaded ConciseCV object.
10.723162
6.263101
1.712117
self._n_folds = obj_dict["param"]["n_folds"] self._n_rows = obj_dict["param"]["n_rows"] self._use_stored_folds = obj_dict["param"]["use_stored_folds"] self._concise_model = Concise.from_dict(obj_dict["init_model"]) if obj_dict["trained_global_model"] is None: self._concise_global_model = None else: self._concise_global_model = Concise.from_dict(obj_dict["trained_global_model"]) self._kf = [(fold, np.asarray(train), np.asarray(test)) for fold, train, test in obj_dict["folds"]] self._cv_model = {fold: Concise.from_dict(model_dict) for fold, model_dict in obj_dict["output"].items()}
def _from_dict(self, obj_dict)
Initialize a model from the dictionary
2.882434
2.878734
1.001286
data = helper.read_json(file_path) return ConciseCV.from_dict(data)
def load(cls, file_path)
Load the object from a JSON file (saved with :py:func:`ConciseCV.save`) Returns: ConciseCV: Loaded ConciseCV object.
12.944995
5.987683
2.161937
b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return np.log(arr / b).astype(arr.dtype)
def pwm_array2pssm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND)
Convert pwm array to pssm array
5.671796
5.497758
1.031656
b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return (np.exp(arr) * b).astype(arr.dtype)
def pssm_array2pwm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND)
Convert pssm array to pwm array
5.295993
5.306325
0.998053
# read-lines if filename.endswith(".gz"): f = gzip.open(filename, 'rt', encoding='utf-8') else: f = open(filename, 'r') lines = f.readlines() f.close() motifs_dict = {} motif_lines = "" motif_name = None def lines2matrix(lines): return np.loadtxt(StringIO(lines)) for line in lines: if line.startswith(">"): if motif_lines: # lines -> matrix motifs_dict[motif_name] = lines2matrix(motif_lines) motif_name = line[1:].strip() motif_lines = "" else: motif_lines += line[skipn_matrix:] if motif_lines and motif_name is not None: motifs_dict[motif_name] = lines2matrix(motif_lines) return motifs_dict
def load_motif_db(filename, skipn_matrix=0)
Read the motif file in the following format ``` >motif_name <skip n>0.1<delim>0.2<delim>0.5<delim>0.6 ... >motif_name2 .... ``` Delim can be anything supported by np.loadtxt # Arguments filename: str, file path skipn_matrix: integer, number of characters to skip when reading the numeric matrix (for Encode = 2) # Returns Dictionary of numpy arrays
2.194329
2.135866
1.027372
fh = open(file_path) # ditch the boolean (x[0]) and just keep the header or sequence since # we know they alternate. faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">")) for header in faiter: # drop the ">" headerStr = header.__next__()[1:].strip() # join all sequence lines to one. seq = "".join(s.strip() for s in faiter.__next__()) yield (headerStr, seq)
def iter_fasta(file_path)
Returns an iterator over the fasta file Given a fasta file. yield tuples of header, sequence Code modified from Brent Pedersen's: "Correct Way To Parse A Fasta File In Python" # Example ```python fasta = fasta_iter("hg19.fa") for header, seq in fasta: print(header) ```
2.159586
2.030995
1.063314
if name_list is None: name_list = [str(i) for i in range(len(seq_list))] # needs to be dict or seq with open(file_path, "w") as f: for i in range(len(seq_list)): f.write(">" + name_list[i] + "\n" + seq_list[i] + "\n")
def write_fasta(file_path, seq_list, name_list=None)
Write a fasta file # Arguments file_path: file path seq_list: List of strings name_list: List of names corresponding to the sequences. If not None, it should have the same length as `seq_list`
2.166742
2.311967
0.937185
profiles = RNAplfold_PROFILES_EXECUTE for i, P in enumerate(profiles): print("running {P}_RNAplfold... ({i}/{N})".format(P=P, i=i + 1, N=len(profiles))) command = "{bin}/{P}_RNAplfold".format(bin=RNAplfold_BIN_DIR, P=P) file_out = "{tmp}/{P}_profile.fa".format(tmp=tmpdir, P=P) args = " -W {W} -L {L} -u {U} < {fa} > {file_out}".format(W=W, L=L, U=U, fa=input_fasta, file_out=file_out) os.system(command + args) # check if the file is empty if os.path.getsize(file_out) == 0: raise Exception("command wrote an empty file: {0}".format(file_out)) print("done!")
def run_RNAplfold(input_fasta, tmpdir, W=240, L=160, U=1)
Arguments: W, Int: span - window length L, Int, maxiumm span U, Int, size of unpaired region
2.890429
3.011464
0.959809
assert pad_with in {"P", "H", "I", "M", "E"} def read_profile(tmpdir, P): return [values.strip().split("\t") for seq_name, values in iter_fasta("{tmp}/{P}_profile.fa".format(tmp=tmpdir, P=P))] def nelem(P, pad_width): return 1 if P is pad_with else 0 arr_hime = np.array([pad_sequences(read_profile(tmpdir, P), value=[nelem(P, pad_with)], align=seq_align, maxlen=maxlen) for P in RNAplfold_PROFILES_EXECUTE], dtype="float32") # add the pairness column arr_p = 1 - arr_hime.sum(axis=0)[np.newaxis] arr = np.concatenate((arr_p, arr_hime)) # reshape to: seq, seq_length, num_channels arr = np.moveaxis(arr, 0, 2) return arr
def read_RNAplfold(tmpdir, maxlen=None, seq_align="start", pad_with="E")
pad_with = with which 2ndary structure should we pad the sequence?
6.009991
5.961494
1.008135
# extend the tmpdir with uuid string to allow for parallel execution tmpdir = tmpdir + "/" + str(uuid4()) + "/" if not isinstance(seq_vec, list): seq_vec = seq_vec.tolist() if not os.path.exists(tmpdir): os.makedirs(tmpdir) fasta_path = tmpdir + "/input.fasta" write_fasta(fasta_path, seq_vec) run_RNAplfold(fasta_path, tmpdir, W=W, L=L, U=U) # 1. split the fasta into pieces # 2. run_RNAplfold for each of them # 3. Read the results return read_RNAplfold(tmpdir, maxlen, seq_align=seq_align, pad_with="E")
def encodeRNAStructure(seq_vec, maxlen=None, seq_align="start", W=240, L=160, U=1, tmpdir="/tmp/RNAplfold/")
Compute RNA secondary structure with RNAplfold implemented in Kazan et al 2010, [doi](https://doi.org/10.1371/journal.pcbi.1000832). # Note Secondary structure is represented as the probability to be in the following states: - `["Pairedness", "Hairpin loop", "Internal loop", "Multi loop", "External region"]` See Kazan et al 2010, [doi](https://doi.org/10.1371/journal.pcbi.1000832) for more information. # Arguments seq_vec: list of DNA/RNA sequences maxlen: Maximum sequence length. See `concise.preprocessing.pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `concise.preprocessing.pad_sequences` for more detail W: Int; span - window length L: Int; maxiumm span U: Int; size of unpaired region tmpdir: Where to store the intermediary files of RNAplfold. # Note Recommended parameters: - for human, mouse use W, L, u : 240, 160, 1 - for fly, yeast use W, L, u : 80, 40, 1 # Returns np.ndarray of shape `(len(seq_vec), maxlen, 5)`
3.372592
3.4988
0.963928
def _format_keras_history(history): return {"params": history.params, "loss": merge_dicts({"epoch": history.epoch}, history.history), } if use_weight: sample_weight = train[2] else: sample_weight = None # train the model logger.info("Fit...") history = History() model.fit(train[0], train[1], batch_size=batch_size, validation_data=valid[:2], epochs=epochs, sample_weight=sample_weight, verbose=2, callbacks=[history] + callbacks) # get history hist = _format_keras_history(history) # load and eval the best model if eval_best: mcp = [x for x in callbacks if isinstance(x, ModelCheckpoint)] assert len(mcp) == 1 model = load_model(mcp[0].filepath) return eval_model(model, valid, add_eval_metrics), hist
def _train_and_eval_single(train, valid, model, batch_size=32, epochs=300, use_weight=False, callbacks=[], eval_best=False, add_eval_metrics={})
Fit and evaluate a keras model eval_best: if True, load the checkpointed model for evaluation
3.42908
3.513564
0.975955
# evaluate the model logger.info("Evaluate...") # - model_metrics model_metrics_values = model.evaluate(test[0], test[1], verbose=0, batch_size=test[1].shape[0]) # evaluation is done in a single pass to have more precise metics model_metrics = dict(zip(_listify(model.metrics_names), _listify(model_metrics_values))) # - eval_metrics y_true = test[1] y_pred = model.predict(test[0], verbose=0) eval_metrics = {k: v(y_true, y_pred) for k, v in add_eval_metrics.items()} # handle the case where the two metrics names intersect # - omit duplicates from eval_metrics intersected_keys = set(model_metrics).intersection(set(eval_metrics)) if len(intersected_keys) > 0: logger.warning("Some metric names intersect: {0}. Ignoring the add_eval_metrics ones". format(intersected_keys)) eval_metrics = _delete_keys(eval_metrics, intersected_keys) return merge_dicts(model_metrics, eval_metrics)
def eval_model(model, test, add_eval_metrics={})
Evaluate model's performance on the test-set. # Arguments model: Keras model test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`. add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from the `concise.eval_metrics` module. # Returns dictionary with evaluation metrics
3.538723
3.59899
0.983254
model_param = merge_dicts({"train_data": train_data}, param["model"], param.get("shared", {})) return model_fn(**model_param)
def get_model(model_fn, train_data, param)
Feed model_fn with train_data and param
4.892914
4.849086
1.009038
c = deepcopy(dct) assert isinstance(keys, list) for k in keys: c.pop(k) return c
def _delete_keys(dct, keys)
Returns a copy of dct without `keys` keys
4.11007
3.451814
1.190699
return {k: np.array([d[k] for d in dict_list]).mean() for k in dict_list[0].keys()}
def _mean_dict(dict_list)
Compute the mean value across a list of dictionaries
2.485774
2.24716
1.106185
lid = np.where(np.array(self.tids) == tid)[0][0] return self.trials[lid]
def get_trial(self, tid)
Retrieve trial by tid
3.999995
3.747064
1.067501
if self.kill_timeout is not None: self.delete_running(self.kill_timeout) return super(CMongoTrials, self).count_by_state_unsynced(arg)
def count_by_state_unsynced(self, arg)
Extends the original object in order to inject checking for stalled jobs and killing them if they are running for too long
6.642394
4.993965
1.330084
running_all = self.handle.jobs_running() running_timeout = [job for job in running_all if coarse_utcnow() > job["refresh_time"] + timedelta(seconds=timeout_last_refresh)] if len(running_timeout) == 0: # Nothing to stop self.refresh_tids(None) return None if dry_run: logger.warning("Dry run. Not removing anything.") logger.info("Removing {0}/{1} running jobs. # all jobs: {2} ". format(len(running_timeout), len(running_all), len(self))) now = coarse_utcnow() logger.info("Current utc time: {0}".format(now)) logger.info("Time horizont: {0}".format(now - timedelta(seconds=timeout_last_refresh))) for job in running_timeout: logger.info("Removing job: ") pjob = job.to_dict() del pjob["misc"] # ignore misc when printing logger.info(pprint.pformat(pjob)) if not dry_run: self.handle.delete(job) logger.info("Job deleted") self.refresh_tids(None)
def delete_running(self, timeout_last_refresh=0, dry_run=False)
Delete jobs stalled in the running state for too long timeout_last_refresh, int: number of seconds
3.923043
3.848861
1.019274
def result2history(result): if isinstance(result["history"], list): return pd.concat([pd.DataFrame(hist["loss"]).assign(fold=i) for i, hist in enumerate(result["history"])]) else: return pd.DataFrame(result["history"]["loss"]) # use all if tid is None: tid = self.valid_tid() res = [result2history(t["result"]).assign(tid=t["tid"]) for t in self.trials if t["tid"] in _listify(tid)] df = pd.concat(res) # reorder columns fold_name = ["fold"] if "fold" in df else [] df = _put_first(df, ["tid"] + fold_name + ["epoch"]) return df
def train_history(self, tid=None)
Get train history as pd.DataFrame
4.483337
4.308535
1.040571
def add_eval(res): if "eval" not in res: if isinstance(res["history"], list): # take the average across all folds eval_names = list(res["history"][0]["loss"].keys()) eval_metrics = np.array([[v[-1] for k, v in hist["loss"].items()] for hist in res["history"]]).mean(axis=0).tolist() res["eval"] = {eval_names[i]: eval_metrics[i] for i in range(len(eval_metrics))} else: res["eval"] = {k: v[-1] for k, v in res["history"]["loss"].items()} return res def add_n_epoch(df): df_epoch = self.train_history().groupby("tid")["epoch"].max().reset_index() df_epoch.rename(columns={"epoch": "n_epoch"}, inplace=True) return pd.merge(df, df_epoch, on="tid", how="left") results = self.get_ok_results(verbose=verbose) rp = [_flatten_dict(_delete_keys(add_eval(x), ignore_vals), separator) for x in results] df = pd.DataFrame.from_records(rp) df = add_n_epoch(df) first = ["tid", "loss", "status"] return _put_first(df, first)
def as_df(self, ignore_vals=["history"], separator=".", verbose=True)
Return a pd.DataFrame view of the whole experiment
3.262137
3.24502
1.005275
assert isinstance(methods, list) if isinstance(extra_args, list): assert(len(extra_args) == len(methods)) else: extra_args = [None] * len(methods) main_args = {"model": model, "ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc, "mutation_positions": mutation_positions, "out_annotation_all_outputs": out_annotation_all_outputs} pred_results = {} for method, xargs in zip(methods, extra_args): if xargs is not None: if isinstance(xargs, dict): for k in argv: if k not in xargs: xargs[k] = argv[k] else: xargs = argv for k in main_args: xargs[k] = main_args[k] res = method(**xargs) pred_results[method.__name__] = res return pred_results
def effect_from_model(model, ref, ref_rc, alt, alt_rc, methods, mutation_positions, out_annotation_all_outputs, extra_args=None, **argv)
Convenience function to execute multiple effect predictions in one call # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument methods: A list of prediction functions to be executed, e.g.: from concise.effects.ism.ism. Using the same function more often than once (even with different parameters) will overwrite the results of the previous calculation of that function. mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. extra_args: None or a list of the same length as 'methods'. The elements of the list are dictionaries with additional arguments that should be passed on to the respective functions in 'methods'. Arguments defined here will overwrite arguments that are passed to all methods. **argv: Additional arguments to be passed on to all methods, e.g,: out_annotation. # Returns Dictionary containing the results of the individual calculations, the keys are the names of the executed functions
2.039656
1.943306
1.049581
market = Market(market, bitshares_instance=ctx.bitshares) t = [["time", "quote", "base", "price"]] for trade in market.trades(limit, start=start, stop=stop): t.append( [ str(trade["time"]), str(trade["quote"]), str(trade["base"]), "{:f} {}/{}".format( trade["price"], trade["base"]["asset"]["symbol"], trade["quote"]["asset"]["symbol"], ), ] ) print_table(t)
def trades(ctx, market, limit, start, stop)
List trades in a market
2.723399
2.663006
1.022679
market = Market(market, bitshares_instance=ctx.bitshares) ticker = market.ticker() t = [["key", "value"]] for key in ticker: t.append([key, str(ticker[key])]) print_table(t)
def ticker(ctx, market)
Show ticker of a market
3.469691
3.166592
1.095718
print_tx(ctx.bitshares.cancel(orders, account=account))
def cancel(ctx, orders, account)
Cancel one or multiple orders
12.983081
12.503759
1.038334
market = Market(market, bitshares_instance=ctx.bitshares) orderbook = market.orderbook() ta = {} ta["bids"] = [["quote", "sum quote", "base", "sum base", "price"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["bids"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["bids"].append( [ str(order["quote"]), str(cumsumquote), str(order["base"]), str(cumsumbase), "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), ] ) ta["asks"] = [["price", "base", "sum base", "quote", "sum quote"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["asks"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["asks"].append( [ "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), str(order["base"]), str(cumsumbase), str(order["quote"]), str(cumsumquote), ] ) t = [["bids", "asks"]] t.append([format_table(ta["bids"]), format_table(ta["asks"])]) print_table(t)
def orderbook(ctx, market)
Show the orderbook of a particular market
1.940172
1.920292
1.010352
amount = Amount(buy_amount, buy_asset) price = Price( price, base=sell_asset, quote=buy_asset, bitshares_instance=ctx.bitshares ) print_tx( price.market.buy(price, amount, account=account, expiration=order_expiration) )
def buy(ctx, buy_amount, buy_asset, price, sell_asset, order_expiration, account)
Buy a specific asset at a certain rate against a base asset
3.951354
3.737893
1.057107
account = Account( account or config["default_account"], bitshares_instance=ctx.bitshares ) t = [["Price", "Quote", "Base", "ID"]] for o in account.openorders: t.append( [ "{:f} {}/{}".format( o["price"], o["base"]["asset"]["symbol"], o["quote"]["asset"]["symbol"], ), str(o["quote"]), str(o["base"]), o["id"], ] ) print_table(t)
def openorders(ctx, account)
List open orders of an account
3.44863
3.335957
1.033775
market = Market(market) ctx.bitshares.bundle = True market.cancel([x["id"] for x in market.accountopenorders(account)], account=account) print_tx(ctx.bitshares.txbuffer.broadcast())
def cancelall(ctx, market, account)
Cancel all orders of an account in a market
9.994492
9.277437
1.07729
from tqdm import tqdm from numpy import linspace market = Market(market) ctx.bitshares.bundle = True if min < max: space = linspace(min, max, num) else: space = linspace(max, min, num) func = getattr(market, side) for p in tqdm(space): func(p, total / float(num), account=account, expiration=order_expiration) print_tx(ctx.bitshares.txbuffer.broadcast())
def spread(ctx, market, side, min, max, num, total, order_expiration, account)
Place multiple orders \b :param str market: Market pair quote:base (e.g. USD:BTS) :param str side: ``buy`` or ``sell`` quote :param float min: minimum price to place order at :param float max: maximum price to place order at :param int num: Number of orders to place :param float total: Total amount of quote to use for all orders :param int order_expiration: Number of seconds until the order expires from the books
4.840821
5.377802
0.900149
from bitshares.dex import Dex dex = Dex(bitshares_instance=ctx.bitshares) print_tx( dex.borrow(Amount(amount, symbol), collateral_ratio=ratio, account=account) )
def borrow(ctx, amount, symbol, ratio, account)
Borrow a bitasset/market-pegged asset
5.740884
5.839172
0.983167
from bitshares.dex import Dex dex = Dex(bitshares_instance=ctx.bitshares) print_tx(dex.adjust_collateral_ratio(symbol, ratio, account=account))
def updateratio(ctx, symbol, ratio, account)
Update the collateral ratio of a call positions
5.639513
5.721285
0.985708
print_tx(ctx.bitshares.fund_fee_pool(symbol, amount, account=account))
def fundfeepool(ctx, symbol, amount, account)
Fund the fee pool of an asset
6.966337
7.454614
0.9345
print_tx( ctx.bitshares.bid_collateral( Amount(collateral_amount, collateral_symbol), Amount(debt_amount, debt_symbol), account=account, ) )
def bidcollateral( ctx, collateral_symbol, collateral_amount, debt_symbol, debt_amount, account )
Bid for collateral in the settlement fund
3.682592
3.327342
1.106767
print_tx(ctx.bitshares.asset_settle(Amount(amount, symbol), account=account))
def settle(ctx, symbol, amount, account)
Fund the fee pool of an asset
11.748738
11.784007
0.997007
if not isinstance(type, (list, tuple)): type = [type] account = Account(account, full=True) ret = {key: list() for key in Vote.types()} for vote in account["votes"]: t = Vote.vote_type_from_id(vote["id"]) ret[t].append(vote) t = [["id", "url", "account"]] for vote in ret["committee"]: t.append( [vote["id"], vote["url"], Account(vote["committee_member_account"])["name"]] ) if "committee" in type: t = [["id", "url", "account", "votes"]] for vote in ret["committee"]: t.append( [ vote["id"], vote["url"], Account(vote["committee_member_account"])["name"], str(Amount({"amount": vote["total_votes"], "asset_id": "1.3.0"})), ] ) print_table(t) if "witness" in type: t = [ [ "id", "account", "url", "votes", "last_confirmed_block_num", "total_missed", "westing", ] ] for vote in ret["witness"]: t.append( [ vote["id"], Account(vote["witness_account"])["name"], vote["url"], str(Amount({"amount": vote["total_votes"], "asset_id": "1.3.0"})), vote["last_confirmed_block_num"], vote["total_missed"], str(Vesting(vote.get("pay_vb")).claimable) if vote.get("pay_vb") else "", ] ) print_table(t) if "worker" in type: t = [["id", "name/url", "daily_pay", "votes", "time", "account"]] for vote in ret["worker"]: votes = Amount({"amount": vote["total_votes_for"], "asset_id": "1.3.0"}) amount = Amount({"amount": vote["daily_pay"], "asset_id": "1.3.0"}) t.append( [ vote["id"], "{name}\n{url}".format(**vote), str(amount), str(votes), "{work_begin_date}\n-\n{work_end_date}".format(**vote), str(Account(vote["worker_account"])["name"]), ] ) print_table(t)
def votes(ctx, account, type)
List accounts vesting balances
2.392692
2.437411
0.981653
if not objects: t = [["Key", "Value"]] info = ctx.bitshares.rpc.get_dynamic_global_properties() for key in info: t.append([key, info[key]]) print_table(t) for obj in objects: # Block if re.match("^[0-9]*$", obj): block = Block(obj, lazy=False, bitshares_instance=ctx.bitshares) t = [["Key", "Value"]] for key in sorted(block): value = block[key] if key == "transactions": value = format_tx(value) t.append([key, value]) print_table(t) # Object Id elif re.match("^\d*\.\d*\.\d*$", obj): data = ctx.bitshares.rpc.get_object(obj) if data: t = [["Key", "Value"]] for key in sorted(data): value = data[key] if isinstance(value, dict) or isinstance(value, list): value = format_tx(value) t.append([key, value]) print_table(t) else: print_message("Object %s unknown" % obj, "warning") # Asset elif obj.upper() == obj and re.match("^[A-Z\.]*$", obj): data = Asset(obj) t = [["Key", "Value"]] for key in sorted(data): value = data[key] if isinstance(value, dict): value = format_tx(value) t.append([key, value]) print_table(t) # Public Key elif re.match("^BTS.{48,55}$", obj): account = ctx.bitshares.wallet.getAccountFromPublicKey(obj) if account: t = [["Account"]] t.append([account]) print_table(t) else: print_message("Public Key not known: %s" % obj, "warning") # Account name elif re.match("^[a-zA-Z0-9\-\._]{2,64}$", obj): account = Account(obj, full=True) if account: t = [["Key", "Value"]] for key in sorted(account): value = account[key] if isinstance(value, dict) or isinstance(value, list): value = format_tx(value) t.append([key, value]) print_table(t) else: print_message("Account %s unknown" % obj, "warning") elif ":" in obj: vote = ctx.bitshares.rpc.lookup_vote_ids([obj])[0] if vote: t = [["Key", "Value"]] for key in sorted(vote): value = vote[key] if isinstance(value, dict) or isinstance(value, list): value = format_tx(value) t.append([key, value]) print_table(t) else: print_message("voteid %s unknown" % obj, "warning") else: print_message("Couldn't identify object to read", "warning")
def info(ctx, objects)
Obtain all kinds of information
2.058872
2.060214
0.999349
from bitsharesbase.operationids import getOperationNameForId from bitshares.market import Market market = Market("%s:%s" % (currency, "BTS")) ticker = market.ticker() if "quoteSettlement_price" in ticker: price = ticker.get("quoteSettlement_price") else: price = ticker.get("latest", 0) price.invert() chain = Blockchain(bitshares_instance=ctx.bitshares) feesObj = chain.chainParameters().get("current_fees") fees = feesObj["parameters"] t = [["Operation", "Type", "Fee", currency]] for fee in fees: for f in fee[1]: t.append( [ highlight(getOperationNameForId(fee[0])), detail(f), detail( str(Amount({"amount": fee[1].get(f, 0), "asset_id": "1.3.0"})) ), detail( str( price * Amount({"amount": fee[1].get(f, 0), "asset_id": "1.3.0"}) ) ), ] ) print_table(t)
def fees(ctx, currency)
List fees
4.100859
4.078627
1.005451
ctx.blockchain.blocking = True tx = ctx.blockchain.htlc_create( Amount(amount, symbol), to, secret, hash_type=hash, expiration=expiration, account=account, ) tx.pop("trx", None) print_tx(tx) results = tx.get("operation_results", {}) if results: htlc_id = results[0][1] print("Your htlc_id is: {}".format(htlc_id))
def create(ctx, to, amount, symbol, secret, hash, account, expiration)
Create an HTLC contract
4.636495
4.320318
1.073184
print_tx(ctx.blockchain.htlc_redeem(htlc_id, secret, account=account))
def redeem(ctx, htlc_id, secret, account)
Redeem an HTLC contract
5.576936
5.473253
1.018944
if not is_flags_class_final(flags_class): raise TypeError('unique check can be applied only to flags classes that have members') if not flags_class.__member_aliases__: return flags_class aliases = ', '.join('%s -> %s' % (alias, name) for alias, name in flags_class.__member_aliases__.items()) raise ValueError('duplicate values found in %r: %s' % (flags_class, aliases))
def unique(flags_class)
A decorator for flags classes to forbid flag aliases.
4.091512
3.85332
1.061815
flags_class = unique(flags_class) other_bits = 0 for name, member in flags_class.__members_without_aliases__.items(): bits = int(member) if other_bits & bits: for other_name, other_member in flags_class.__members_without_aliases__.items(): if int(other_member) & bits: raise ValueError("%r: '%s' and '%s' have overlapping bits" % (flags_class, other_name, name)) else: other_bits |= bits
def unique_bits(flags_class)
A decorator for flags classes to forbid declaring flags with overlapping bits.
3.289966
2.857773
1.151234
if isinstance(members, str): members = ((name, UNDEFINED) for name in members.replace(',', ' ').split()) elif isinstance(members, (tuple, list, collections.Set)): if members and isinstance(next(iter(members)), str): members = ((name, UNDEFINED) for name in members) elif isinstance(members, collections.Mapping): members = members.items() return members
def process_inline_members_definition(members)
:param members: this can be any of the following: - a string containing a space and/or comma separated list of names: e.g.: "item1 item2 item3" OR "item1,item2,item3" OR "item1, item2, item3" - tuple/list/Set of strings (names) - Mapping of (name, data) pairs - any kind of iterable that yields (name, data) pairs :return: An iterable of (name, data) pairs.
3.0011
2.752749
1.090219
members = [] auto_flags = [] all_bits = 0 for name, data in member_definitions: bits, data = cls.flag_attribute_value_to_bits_and_data(name, data) if bits is UNDEFINED: auto_flags.append(len(members)) members.append((name, data)) elif is_valid_bits_value(bits): all_bits |= bits members.append((name, bits, data)) else: raise TypeError("Expected an int value as the bits of flag '%s', received %r" % (name, bits)) # auto-assigning unused bits to members without custom defined bits bit = 1 for index in auto_flags: while bit & all_bits: bit <<= 1 name, data = members[index] members[index] = name, bit, data bit <<= 1 return members
def process_member_definitions(cls, member_definitions)
The incoming member_definitions contains the class attributes (with their values) that are used to define the flag members. This method can do anything to the incoming list and has to return a final set of flag definitions that assigns bits to the members. The returned member definitions can be completely different or unrelated to the incoming ones. :param member_definitions: A list of (name, data) tuples. :return: An iterable of iterables yielding 3 items: name, bits, data
4.367011
3.766988
1.159284
if not isinstance(s, str): raise TypeError("Expected an str instance, received %r" % (s,)) return cls(cls.bits_from_simple_str(s))
def from_simple_str(cls, s)
Accepts only the output of to_simple_str(). The output of __str__() is invalid as input.
4.403147
4.151029
1.060736
if not isinstance(s, str): raise TypeError("Expected an str instance, received %r" % (s,)) return cls(cls.bits_from_str(s))
def from_str(cls, s)
Accepts both the output of to_simple_str() and __str__().
4.721707
4.495699
1.050272
try: if len(s) <= len(cls.__name__) or not s.startswith(cls.__name__): return cls.bits_from_simple_str(s) c = s[len(cls.__name__)] if c == '(': if not s.endswith(')'): raise ValueError return cls.bits_from_simple_str(s[len(cls.__name__)+1:-1]) elif c == '.': member_name = s[len(cls.__name__)+1:] return int(cls.__all_members__[member_name]) else: raise ValueError except ValueError as ex: if ex.args: raise raise ValueError("%s.%s: invalid input: %r" % (cls.__name__, cls.bits_from_str.__name__, s)) except KeyError as ex: raise ValueError("%s.%s: Invalid flag name '%s' in input: %r" % (cls.__name__, cls.bits_from_str.__name__, ex.args[0], s))
def bits_from_str(cls, s)
Converts the output of __str__ into an integer.
2.585548
2.523479
1.024597
if cer: cer = Price(cer, quote=symbol, base="1.3.0", bitshares_instance=ctx.bitshares) print_tx( ctx.bitshares.publish_price_feed( symbol, Price(price, market), cer=cer, mssr=mssr, mcr=mcr, account=account ) )
def newfeed(ctx, symbol, price, market, cer, mssr, mcr, account)
Publish a price feed! Examples: \b uptick newfeed USD 0.01 USD/BTS uptick newfeed USD 100 BTS/USD Core Exchange Rate (CER) \b If no CER is provided, the cer will be the same as the settlement price with a 5% premium (Only if the 'market' is against the core asset (e.g. BTS)). The CER is always defined against the core asset (BTS). This means that if the backing asset is not the core asset (BTS), then you must specify your own cer as a float. The float `x` will be interpreted as `x BTS/SYMBOL`.
5.565116
5.441806
1.02266
import builtins witnesses = Witnesses(bitshares_instance=ctx.bitshares) def test_price(p, ref): if math.fabs(float(p / ref) - 1.0) > pricethreshold / 100.0: return click.style(str(p), fg="red") elif math.fabs(float(p / ref) - 1.0) > pricethreshold / 2.0 / 100.0: return click.style(str(p), fg="yellow") else: return click.style(str(p), fg="green") def price_diff(p, ref): d = (float(p) - float(ref)) / float(ref) * 100 if math.fabs(d) >= 5: color = "red" elif math.fabs(d) >= 2.5: color = "yellow" else: color = "green" return click.style("{:8.2f}%".format(d), fg=color) def test_date(d): t = d.replace(tzinfo=None) now = datetime.utcnow() if now < t + timedelta(minutes=maxage): return click.style(str(t), fg="green") if now < t + timedelta(minutes=maxage / 2.0): return click.style(str(t), fg="yellow") else: return click.style(str(t), fg="red") output = "" for asset in tqdm(assets): t = PrettyTable( [ "Asset", "Producer", "Active Witness", "Date", "Settlement Price", "Core Exchange Price", "MCR", "SSPR", "delta", ] ) t.align = "c" t.align["Producer"] = "l" asset = Asset(asset, full=True, bitshares_instance=ctx.bitshares) current_feed = asset.feed feeds = asset.feeds producingwitnesses = builtins.set() witness_accounts = [x["witness_account"] for x in witnesses] for feed in tqdm(feeds): producingwitnesses.add(feed["producer"]["id"]) t.add_row( [ asset["symbol"], feed["producer"]["name"], click.style( "X" if feed["producer"]["id"] in witness_accounts else "", bold=True, ), test_date(feed["date"]), test_price( feed["settlement_price"], current_feed["settlement_price"] ), test_price( feed["core_exchange_rate"], current_feed["core_exchange_rate"] ), feed["maintenance_collateral_ratio"] / 10, feed["maximum_short_squeeze_ratio"] / 10, price_diff( feed["core_exchange_rate"], current_feed["core_exchange_rate"] ), ] ) for missing in builtins.set(witness_accounts).difference(producingwitnesses): witness = Witness(missing) t.add_row( [ click.style(asset["symbol"], bg="red"), click.style(witness.account["name"], bg="red"), click.style( "X" if feed["producer"]["id"] in witness_accounts else "", bold=True, ), click.style(str(datetime(1970, 1, 1))), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), ] ) output += t.get_string(sortby="Date", reversesort=True) output += "\n" click.echo(output)
def feeds(ctx, assets, pricethreshold, maxage)
Price Feed Overview
2.241378
2.246674
0.997643
t = format_table(*args, **kwargs) click.echo(t)
def print_table(*args, **kwargs)
if csv: import csv t = csv.writer(sys.stdout, delimiter=";") t.writerow(header) else: t = PrettyTable(header) t.align = "r" t.align["details"] = "l"
6.019273
6.05725
0.99373
try: data = list(eval(d) for d in arguments) except: data = arguments ret = getattr(ctx.bitshares.rpc, call)(*data, api=api) print_dict(ret)
def rpc(ctx, call, arguments, api)
Construct RPC call directly \b You can specify which API to send the call to: uptick rpc --api assets You can also specify lists using uptick rpc get_objects "['2.0.0', '2.1.0']"
5.627521
6.917074
0.81357
print_tx(ctx.bitshares.approvecommittee(members, account=account))
def approvecommittee(ctx, members, account)
Approve committee member(s)
13.48536
14.149859
0.953038
print_tx(ctx.bitshares.disapprovecommittee(members, account=account))
def disapprovecommittee(ctx, members, account)
Disapprove committee member(s)
11.604912
12.339385
0.940477