query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Sequence to_fasta() should return Fastaformat string | def test_to_fasta(self):
even = "TCAGAT"
odd = even + "AAA"
even_dna = self.SEQ(even, name="even")
odd_dna = self.SEQ(odd, name="odd")
self.assertEqual(even_dna.to_fasta(), ">even\nTCAGAT\n")
# set line wrap to small number so we can test that it works
self.assert... | [
"def toFasta(self):\n return \">{}\\n{}\\n\".format(self.name, self.seq)",
"def to_fasta(self):\n return Fasta(self.identifier, self.description, self.to_string())",
"def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SequenceClass(even, name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
correctly annotates a Sequence from a gff file | def test_annotate_from_gff(self):
from cogent3.parse.fasta import FastaParser
fasta_path = os.path.join("data/c_elegans_WS199_dna_shortened.fasta")
gff3_path = os.path.join("data/c_elegans_WS199_shortened_gff.gff3")
name, seq = next(FastaParser(fasta_path))
sequence = Sequence(... | [
"def annotateFromGff(self, f):\n for (name, source, feature, start, end, score,\n strand, frame, attributes, comments) in GffParser(f):\n if name in self.NamedSeqs:\n self.NamedSeqs[name].addFeature( feature, \n parse_attributes(a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence strip_degenerate should remove any degenerate bases | def test_strip_degenerate(self):
self.assertEqual(self.RNA("UCAG-").strip_degenerate(), "UCAG-")
self.assertEqual(self.RNA("NRYSW").strip_degenerate(), "")
self.assertEqual(self.RNA("USNG").strip_degenerate(), "UG") | [
"def cleaning_ambiguous_bases(seq):\n # compile the regex with all ambiguous bases\n pat = re.compile(r'[NRYWXSKM]')\n # look for the ambiguous bases and replace by\n # nothing\n return re.sub(pat, '', seq)",
"def degenerate2(s):\n from lasagna.utils import base_repr\n\n n = s.count('N')\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence strip_bad should remove any nonbase, nongap chars | def test_strip_bad(self):
# have to turn off check to get bad data in; no longer preserves case
self.assertEqual(
self.RNA("UCxxxAGwsnyrHBNzzzD-D", check=False).strip_bad(),
"UCAGWSNYRHBND-D",
)
self.assertEqual(self.RNA("@#^*($@!#&()!@QZX", check=False).strip_bad... | [
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UxxCAGwsnyrHBNz#!D-D\", check=False).strip_bad_and_gaps(),\n \"UCAGWSNYRHBNDD\",\n )\n self.assertEqual(\n self.RN... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence strip_bad_and_gaps should remove gaps and bad chars | def test_strip_bad_and_gaps(self):
# have to turn off check to get bad data in; no longer preserves case
self.assertEqual(
self.RNA("UxxCAGwsnyrHBNz#!D-D", check=False).strip_bad_and_gaps(),
"UCAGWSNYRHBNDD",
)
self.assertEqual(
self.RNA("@#^*($@!#&()!... | [
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"ACG--GRN?\")\n self.assertEqual(r.strip_bad_and_gaps(), \"ACGGRN\")\n r._data[0] = 99\n self.assertEqual(r.strip_bad_and_gaps(), \"CGGRN\")",
"def cleanga... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence shuffle should return new random sequence w/ same monomers | def test_shuffle(self):
r = self.RNA("UUUUCCCCAAAAGGGG")
s = r.shuffle()
self.assertNotEqual(r, s)
self.assertEqualItems(r, s) | [
"def test_shuffle(self):\n random.shuffle(self.seq)\n self.seq.sort()\n self.assertEqual(self.seq, range(10))",
"def shuffle(self):\n for i in xrange(self.n - 1):\n pos = random.randint(i, self.n - 1)\n self.to[i], self.to[pos] = self.to[pos], self.to[i]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence is_gap should return True if char is a valid gap char | def test_is_gap(self):
r = self.RNA("ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN")
for char in "qwertyuiopasdfghjklzxcvbnmQWERTYUIOASDFGHJKLZXCVBNM":
assert not r.is_gap(char)
assert r.is_gap("-")
# only works on a single literal that's a gap, not on a sequence.
# possibly,... | [
"def is_gap(cls, char):\n return char in cls.gap_alphabet()",
"def is_gap(self, char=None):\n if char is None: # no char - so test if self is all gaps\n return len(self) == self.count_gaps()\n else:\n return self.moltype.is_gap(char)",
"def find_gaps(s, gapcode=45):\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence is_degenerate should return True if degen symbol in seq | def test_is_degenerate(self):
assert not self.RNA("").is_degenerate()
assert not self.RNA("UACGCUACAUGuacgucaguGCUAGCUA---ACGUCAG").is_degenerate()
assert self.RNA("N").is_degenerate()
assert self.RNA("R").is_degenerate()
assert self.RNA("y").is_degenerate()
assert self.R... | [
"def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence is_strict should return True if all symbols in Monomers | def test_is_strict(self):
assert self.RNA("").is_strict()
assert self.PROT("A").is_strict()
assert self.RNA("UAGCACUgcaugcauGCAUGACuacguACAUG").is_strict()
assert not self.RNA("CAGUCGAUCA-cgaucagUCGAUGAC").is_strict() | [
"def is_atomic(self):\n \n symbols=set()\n for e in self.symbols:\n if not e=='':\n symbols.add(e)\n\n for s in symbols: #unicity first\n count=0\n for e in symbols:\n if s==e:\n count+=1\n if c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence first_gap should return index of first gap symbol, or None | def test_first_gap(self):
self.assertEqual(self.RNA("").first_gap(), None)
self.assertEqual(self.RNA("a").first_gap(), None)
self.assertEqual(self.RNA("uhacucHuhacUUhacan").first_gap(), None)
self.assertEqual(self.RNA("-abc").first_gap(), 0)
self.assertEqual(self.RNA("b-ac").firs... | [
"def first_gap(self):\n a = self.gap_indices()\n try:\n return a[0]\n except IndexError:\n return None",
"def find_symbol_first(sequence, symbol):\n match_i = find_symbol(sequence, symbol)\n if len(match_i) == 0:\n return -1\n return np.min(match_i)",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence first_degenerate should return index of first degen symbol | def test_first_degenerate(self):
self.assertEqual(self.RNA("").first_degenerate(), None)
self.assertEqual(self.RNA("a").first_degenerate(), None)
self.assertEqual(self.RNA("UCGACA--CU-gacucaguacgua").first_degenerate(), None)
self.assertEqual(self.RNA("nCAGU").first_degenerate(), 0)
... | [
"def first_bad_pair(sequence):\n for i in range(len(sequence)-1):\n if sequence[i] >= sequence[i+1]:\n return i\n return -1",
"def find_symbol_first(sequence, symbol):\n match_i = find_symbol(sequence, symbol)\n if len(match_i) == 0:\n return -1\n return np.min(match_i)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence first_non_strict should return index of first nonstrict symbol | def test_first_non_strict(self):
self.assertEqual(self.RNA("").first_non_strict(), None)
self.assertEqual(self.RNA("A").first_non_strict(), None)
self.assertEqual(self.RNA("ACGUACGUcgaucagu").first_non_strict(), None)
self.assertEqual(self.RNA("N").first_non_strict(), 0)
self.ass... | [
"def find_symbol_first(sequence, symbol):\n match_i = find_symbol(sequence, symbol)\n if len(match_i) == 0:\n return -1\n return np.min(match_i)",
"def getFirstSymbolLVA(self) -> int:\n ...",
"def second_index(text: str, symbol: str) -> [int, None]:\n # your code here\n idx = [m.sta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence disambiguate should remove degenerate bases | def test_disambiguate(self):
self.assertEqual(self.RNA("").disambiguate(), "")
self.assertEqual(
self.RNA("AGCUGAUGUA--CAGU").disambiguate(), "AGCUGAUGUA--CAGU"
)
self.assertEqual(
self.RNA("AUn-yrs-wkmCGwmrNMWRKY").disambiguate("strip"), "AU--CG"
)
... | [
"def cleaning_ambiguous_bases(seq):\n # compile the regex with all ambiguous bases\n pat = re.compile(r'[NRYWXSKM]')\n # look for the ambiguous bases and replace by\n # nothing\n return re.sub(pat, '', seq)",
"def test_all_unambig_dna_codons(self):\n self.translate_all_codons(\"ATCGatcg\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence gap_indices should return correct gap positions | def test_gap_indices(self):
self.assertEqual(self.RNA("").gap_indices(), [])
self.assertEqual(self.RNA("ACUGUCAGUACGHSDKCUCDNNS").gap_indices(), [])
self.assertEqual(self.RNA("GUACGUACAKDC-SDHDSK").gap_indices(), [12])
self.assertEqual(self.RNA("-DSHUHDS").gap_indices(), [0])
sel... | [
"def gap_indices(self):\n return list(self.gap_array().nonzero()[0])",
"def gap_indices(self):\n return self.moltype.gap_indices(self)",
"def gap_maps(self):\n degapped_to_gapped = []\n gapped_to_degapped = []\n non_gap_count = 0\n for i, e in enumerate(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence gap_vector should return correct gap positions | def test_gap_vector(self):
def g(x):
return self.RNA(x).gap_vector()
self.assertEqual(g(""), [])
self.assertEqual(g("ACUGUCAGUACGHCSDKCCUCCDNCNS"), [False] * 27)
self.assertEqual(
g("GUACGUAACAKADC-SDAHADSAK"),
list(map(bool, list(map(int, "000000000... | [
"def gap_vector(self):\n return self.get_gapped_seq().gap_vector()",
"def gap_vector(self):\n return [self.is_gap(c) for c in self._sequence]",
"def gapVector(self):\n return self.getGappedSeq().gapVector()",
"def gap_vector(self):\n return list(map(bool, self.gap_array()))",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence gap_maps should return dicts mapping gapped/ungapped pos | def test_gap_maps(self):
empty = ""
no_gaps = "aaa"
all_gaps = "---"
start_gaps = "--abc"
end_gaps = "ab---"
mid_gaps = "--a--b-cd---"
def gm(x):
return self.RNA(x).gap_maps()
self.assertEqual(gm(empty), ({}, {}))
self.assertEqual(gm(... | [
"def gap_maps(self):\n degapped_to_gapped = []\n gapped_to_degapped = []\n non_gap_count = 0\n for i, e in enumerate(self):\n if self.is_gap(e):\n gapped_to_degapped.append(None)\n else:\n gapped_to_degapped.append(non_gap_count)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence count_degenerate should return correct degen base count | def test_count_degenerate(self):
self.assertEqual(self.RNA("").count_degenerate(), 0)
self.assertEqual(self.RNA("GACUGCAUGCAUCGUACGUCAGUACCGA").count_degenerate(), 0)
self.assertEqual(self.RNA("N").count_degenerate(), 1)
self.assertEqual(self.PROT("N").count_degenerate(), 0)
self... | [
"def count(seq):\n\treturn sum(1 for x in seq)",
"def n_neg(seq):\n seq = seq.upper()\n\n # Check for validity of sequence\n for aa in seq:\n if aa not in bd.aa.keys():\n raise RuntimeError(aa + ' is not a valid amino acid.')\n # Count Es & Ds and return count\n return seq.count('... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence can_mispair should return True on any possible mispair | def test_can_mispair(self):
assert not self.RNA("").can_mispair("")
assert self.RNA("N").can_mispair("N")
assert self.RNA("R").can_mispair("Y")
assert self.RNA("N").can_mispair("r")
assert self.RNA("CGUACGCAN").can_mispair("NUHCHUACH")
assert self.RNA("U").can_mispair("C"... | [
"def is_logical_consequence(premises, conclusion): # function TT-Entails? in the book AIMA.\n pass\n # ======== YOUR CODE HERE ========",
"def ok_mm_primer(primer_seq, all_primer_seqs, primer_mm):\r\n for curr_pat in all_primer_seqs:\r\n if count_mismatches(primer_seq, curr_pat, primer_mm) <= pri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence must_pair should return True when no possible mispairs | def test_must_pair(self):
assert self.RNA("").must_pair("")
assert not self.RNA("N").must_pair("N")
assert not self.RNA("R").must_pair("Y")
assert not self.RNA("A").must_pair("A")
assert not self.RNA("CGUACGCAN").must_pair("NUGCGUACG")
assert not self.RNA("U").must_pair("... | [
"def is_pairing(dna_seq1, dna_seq2):\n if dna_seq1 == None and dna_seq2 == None:\n return True\n if dna_seq1 == None or dna_seq2 == None:\n return False\n if dna_seq1.value == 'T':\n if dna_seq2.value == 'A':\n return is_pairing(dna_seq1.rest,dna_seq2.rest)\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence matrix_distance should look up distances from a matrix | def test_matrix_distance(self):
# note that the score matrix must contain 'diagonal' elements m[i][i]
# to avoid failure when the sequences match.
m = {"U": {"U": 0, "C": 1, "A": 5}, "C": {"C": 0, "A": 2, "G": 4}}
self.assertEqual(self.RNA("UUUCCC").matrix_distance("UCACGG", m), 14)
... | [
"def distance_matrix(sequences):\n\n if len(sequences) == 0:\n print(\"FATAL: No sequences found\")\n sys.exit(-1)\n else:\n print(\"Found %d sequences\" % len(sequences))\n \n print(\"Creating distance matrix start.\")\n dmx = PairwiseSimilarity(sequences)\n print(\"Distance ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence frac_diff should return difference between sequences | def test_frac_diff(self):
s1 = self.RNA("ACGU")
s2 = self.RNA("AACG")
s3 = self.RNA("GG")
s4 = self.RNA("A")
e = self.RNA("")
self.assertEqual(s1.frac_diff(e), 0)
self.assertEqual(s1.frac_diff(s2), 0.75)
self.assertEqual(s1.frac_diff(s3), 1)
self.a... | [
"def diff(seq):\n d = seq[1] - seq[0]\n for i in range(1, len(seq)):\n if not seq[i - 1] + d == seq[i]:\n return False\n return floatfrac(d)",
"def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence frac_same_gaps should return similarity in gap positions | def test_frac_same_gaps(self):
s1 = self.RNA("AAAA")
s2 = self.RNA("GGGG")
s3 = self.RNA("----")
s4 = self.RNA("A-A-")
s5 = self.RNA("-G-G")
s6 = self.RNA("UU--")
s7 = self.RNA("-")
s8 = self.RNA("GGG")
e = self.RNA("")
self.assertEqual(s1.... | [
"def test_frac_same_non_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence frac_diff_gaps should return difference in gap positions | def test_frac_diffGaps(self):
s1 = self.RNA("AAAA")
s2 = self.RNA("GGGG")
s3 = self.RNA("----")
s4 = self.RNA("A-A-")
s5 = self.RNA("-G-G")
s6 = self.RNA("UU--")
s7 = self.RNA("-")
s8 = self.RNA("GGG")
e = self.RNA("")
self.assertEqual(s1.f... | [
"def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence frac_same_non_gaps should return similarities at nongaps | def test_frac_same_non_gaps(self):
s1 = self.RNA("AAAA")
s2 = self.RNA("AGGG")
s3 = self.RNA("GGGG")
s4 = self.RNA("AG--GA-G")
s5 = self.RNA("CU--CU-C")
s6 = self.RNA("AC--GC-G")
s7 = self.RNA("--------")
s8 = self.RNA("AAAA----")
s9 = self.RNA("A-... | [
"def test_frac_same_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence frac_diff_non_gaps should return differences at nongaps | def test_frac_diffNonGaps(self):
s1 = self.RNA("AAAA")
s2 = self.RNA("AGGG")
s3 = self.RNA("GGGG")
s4 = self.RNA("AG--GA-G")
s5 = self.RNA("CU--CU-C")
s6 = self.RNA("AC--GC-G")
s7 = self.RNA("--------")
s8 = self.RNA("AAAA----")
s9 = self.RNA("A-GG... | [
"def test_frac_diffGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence frac_similar should return the fraction similarity | def test_frac_similar(self):
transitions = dict.fromkeys(
[
("A", "A"),
("A", "G"),
("G", "A"),
("G", "G"),
("U", "U"),
("U", "C"),
("C", "U"),
("C", "C"),
]
... | [
"def frac_similar(self, other, similar_pairs):\n if not self or not other:\n return 0.0\n\n return for_seq(f=lambda x, y: (x, y) in similar_pairs, normalizer=per_shortest)(\n self, other\n )",
"def frac_similar(self, other, similar_pairs):\n if not self or not oth... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
with_termini_unknown should reset termini to unknown char | def test_with_termini_unknown(self):
s1 = self.RNA("-?--AC--?-")
s2 = self.RNA("AC")
self.assertEqual(s1.with_termini_unknown(), "????AC????")
self.assertEqual(s2.with_termini_unknown(), "AC") | [
"def test_term_chars_default(self, instrument):\n assert instrument.term_chars == b'\\r'",
"def test_term_chars_default(self, instrument):\n assert instrument.term_chars is None",
"def with_termini_unknown(self):\n gaps = self.gap_vector()\n first_nongap = last_nongap = None\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
gap degen character should be treated consistently | def test_consistent_gap_degen_handling(self):
# the degen character '?' can be a gap, so when we strip either gaps or
# degen characters it should be gone too
raw_seq = "---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--"
raw_ungapped = re.sub("[-?]", "", raw_seq)
raw_no_a... | [
"def test_is_gap(self):\n r = self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\")\n for char in \"qwertyuiopasdfghjklzxcvbnmQWERTYUIOASDFGHJKLZXCVBNM\":\n assert not r.is_gap(char)\n assert r.is_gap(\"-\")\n # only works on a single literal that's a gap, not on a sequence.\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DnaSequence should behave as expected | def test_DnaSequence(self):
x = DnaSequence("tcag")
# note: no longer preserves case
self.assertEqual(x, "TCAG")
x = DnaSequence("aaa") + DnaSequence("ccc")
# note: doesn't preserve case
self.assertEqual(x, "AAACCC")
assert x.moltype is DNA
self.assertRai... | [
"def test_ModelDnaCodonSequence(self):\n d = ArrayDnaCodonSequence(\"UUUCGU\")\n self.assertEqual(str(d), \"TTTCGT\")\n self.assertEqual(d._data, array([0, 28]))\n self.assertEqual(str(d.to_rna()), \"UUUCGU\")\n self.assertEqual(str(d.to_dna()), \"TTTCGT\")",
"def to_dna(self):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence to_fasta() should return Fastaformat string | def test_to_fasta(self):
even = "TCAGAT"
odd = even + "AAA"
even_dna = self.SequenceClass(even, name="even")
odd_dna = self.SequenceClass(odd, name="odd")
self.assertEqual(even_dna.to_fasta(), ">even\nTCAGAT\n")
# set line wrap to small number so we can test that it works... | [
"def toFasta(self):\n return \">{}\\n{}\\n\".format(self.name, self.seq)",
"def to_fasta(self):\n return Fasta(self.identifier, self.description, self.to_string())",
"def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SEQ(even, name=\"even\")... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence to_phylip() should return oneline phylip string | def test_to_phylip(self):
s = self.SequenceClass("ACG", name="xyz")
self.assertEqual(s.to_phylip(), "xyz" + " " * 27 + "ACG") | [
"def oneliner2phylip(self, line):\n seqs = line.strip(\";\").split(',')\n label_seqs = zip(seqs[:-1:2],seqs[1::2])\n taxa_count = len(label_seqs)\n seq_length = len(label_seqs[0][1])\n alignment = \"%s %s\\n\" % (taxa_count, seq_length) # add header\n for taxa_name, seq in ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
gapped sequence nongaps() should return correct array | def test_nongaps(self):
sc = self.SequenceClass
self.assertEqual(sc("TC").nongaps(), array([1, 1]))
self.assertEqual(sc("T-").nongaps(), array([1, 0])) | [
"def test_includinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=True)\n assert len(model.get_alphabet()) == 5",
"def test_excludeinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=False)\n assert len(model.get_alphabet()) == ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
gapped sequence regap() should return correct sequence | def test_regap(self):
sc = self.SequenceClass
self.assertEqual(str(sc("TC").regap(sc("A---A-"))), "T---C-") | [
"def understand_return_sequence():\n model_1 = Sequential()\n model_1.add(GRU(input_dim=256, output_dim=256, return_sequences=True))\n model_1.compile(loss='mean_squared_error', optimizer='sgd')\n train_x = np.random.randn(100, 78, 256)\n train_y = np.random.randn(100, 78, 256)\n model_1.fit(train... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Regular sequence should convert to model sequence | def test_regular_to_model(self):
r = RNA.make_seq("AAA", name="x")
s = RNA.make_array_seq(r)
self.assertEqual(str(s), "AAA")
self.assertEqual(s.moltype, RNA)
self.assertEqual(s.name, "x") | [
"def test_model_to_regular(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def test_model_to_model(self):\n r = RNA.make_array_seq(\"AAA\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Model sequence should convert to regular sequence | def test_model_to_regular(self):
r = RNA.make_array_seq("AAA", name="x")
s = RNA.make_seq(r)
self.assertEqual(str(s), "AAA")
self.assertEqual(s.moltype, RNA)
self.assertEqual(s.name, "x") | [
"def test_regular_to_model(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def test_model_to_model(self):\n r = RNA.make_array_seq(\"AAA\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Regular sequence should convert to regular sequence | def test_regular_to_regular(self):
r = RNA.make_seq("AAA", name="x")
s = RNA.make_seq(r)
self.assertEqual(str(s), "AAA")
self.assertEqual(s.moltype, RNA)
self.assertEqual(s.name, "x") | [
"def test_model_to_regular(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def test_make_sequences(self):\n self.assertEqual(self.sequenc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Model sequence should convert to model sequence | def test_model_to_model(self):
r = RNA.make_array_seq("AAA", name="x")
s = RNA.make_array_seq(r)
self.assertEqual(str(s), "AAA")
self.assertEqual(s.moltype, RNA)
self.assertEqual(s.name, "x") | [
"def test_regular_to_model(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def test_model_to_regular(self):\n r = RNA.make_array_seq(\"AA... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ArrayDnaCodonSequence should behave as expected | def test_ModelDnaCodonSequence(self):
d = ArrayDnaCodonSequence("UUUCGU")
self.assertEqual(str(d), "TTTCGT")
self.assertEqual(d._data, array([0, 28]))
self.assertEqual(str(d.to_rna()), "UUUCGU")
self.assertEqual(str(d.to_dna()), "TTTCGT") | [
"def test_ModelRnaCodonSequence(self):\n r = ArrayRnaCodonSequence(\"UUUCGU\")\n self.assertEqual(str(r), \"UUUCGU\")\n self.assertEqual(r._data, array([0, 28]))\n self.assertEqual(str(r.to_rna()), \"UUUCGU\")\n self.assertEqual(str(r.to_dna()), \"TTTCGT\")",
"def to_dna(self):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ArrayRnaCodonSequence should behave as expected | def test_ModelRnaCodonSequence(self):
r = ArrayRnaCodonSequence("UUUCGU")
self.assertEqual(str(r), "UUUCGU")
self.assertEqual(r._data, array([0, 28]))
self.assertEqual(str(r.to_rna()), "UUUCGU")
self.assertEqual(str(r.to_dna()), "TTTCGT") | [
"def test_ModelDnaCodonSequence(self):\n d = ArrayDnaCodonSequence(\"UUUCGU\")\n self.assertEqual(str(d), \"TTTCGT\")\n self.assertEqual(d._data, array([0, 28]))\n self.assertEqual(str(d.to_rna()), \"UUUCGU\")\n self.assertEqual(str(d.to_dna()), \"TTTCGT\")",
"def codons(self, f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ArraySequence distance should work with function of indices | def test_distance_indices(self):
s1 = self.RNA("AUGC")
s2 = self.RNA("AAGC")
def f(x, y):
if x == 2 or y == 2:
return 10
return 0
self.assertEqual(s1.distance(s2, f, use_indices=True), 20) | [
"def test_distance_array(self):\n s1 = Source([[10, 10], [10, 20]], values=[1.0, 2.0])\n assert(s1.distance([20, 25]) == sqrt(200))\n assert(s1.distance(array([20, 25])) == sqrt(200))",
"def distances(data, indices, element, Functions):\n result = [0 for _ in range(len(data))]\n for att... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sequence strip_bad_and_gaps should remove gaps and bad chars | def test_strip_bad_and_gaps(self):
# have to turn off check to get bad data in; no longer preserves case
r = self.RNA("ACG--GRN?")
self.assertEqual(r.strip_bad_and_gaps(), "ACGGRN")
r._data[0] = 99
self.assertEqual(r.strip_bad_and_gaps(), "CGGRN") | [
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UxxCAGwsnyrHBNz#!D-D\", check=False).strip_bad_and_gaps(),\n \"UCAGWSNYRHBNDD\",\n )\n self.assertEqual(\n self.RN... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
abseq array seq should count characters | def test_count_ab(self):
AB = get_moltype("ab")
seq = AB.make_array_seq("aaba-", alphabet=AB.alphabet.with_gap_motif())
c = seq.counts()
self.assertEqual(c.to_dict(), {"a": 3, "b": 1})
c = seq.counts(allow_gap=True)
self.assertEqual(c.to_dict(), {"a": 3, "b": 1, "-": 1}) | [
"def codingSeq(acc, seq, exon_list=None):\n\n for s in seq:\n seq = s.replace(' ', '')\n print(len(seq))\n if exon_list != None:\n coding_seq = ''\n for x in exon_list:\n print(x)\n start = x[0]\n end = x[1]\n coding_seq += ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads .refFiles within folder and returns Benchmark objects. | def load_from_folder(folder):
refs = []
for input_file in os.listdir(folder):
if input_file.endswith(".ref"):
refs.append(Reference(folder, input_file))
return refs | [
"def load(file_path):\n return PytestBenchmarkFile(file_path)",
"def init(self, benchmark):\n for path in self.files:\n if os.path.exists(os.path.join(self.path, path)):\n relroot, filename = os.path.split(path)\n benchmark.addInstance(self.path, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a filename for the diff image. | def __diff_filename(self):
diff_dir = os.path.join(self.__folder, Reference.DIFF_OUT)
if not os.path.exists(diff_dir):
os.makedirs(diff_dir)
return os.path.join(diff_dir, self.__name +'.jpg') | [
"def generate_image_filename():\n now = datetime.now().strftime('%a-%w-%b-%H:%M:%S')\n return 'CCTV_{0}.jpg'.format(now)",
"def make_filename():\n\n import datetime\n now = datetime.datetime.now()\n timestamp = now.timestamp()\n timestamp_without_dot = str(timestamp).replace(\".\", \"\")\n fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns complete path to reference file. | def reffile(self):
return os.path.join(self.__folder, self.__name + '.ref') | [
"def file_reference(self):\n return self.__file_reference",
"def referenceFile(self):\n try:\n return _FileReference( cmds.referenceQuery( self, f=1) )\n except RuntimeError:\n None",
"def path(self) -> str:\n\t\treturn os.path.join(self.location, self.fname)",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads the positions where bugs where found. | def __load_bugs(self):
bugs = []
with open(self.reffile(), 'rb') as reffile:
reader = csv.reader(reffile, delimiter=';', quotechar='\n')
for line in reader:
bugs.append(tuple(map(int, line)))
return bugs | [
"def loadKnownBugs(cls):\n fH = None\n try:\n logger.info(\"Loading Known Failures from : \" + str(config.CliConfig().common.known_bugs_filename))\n if os.path.isfile(config.CliConfig().common.known_bugs_filename):\n with open(config.CliConfig().common.known_bugs_f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if the list contains a similar rect. | def __has_similar_rect(rect, rect_list):
for i in reversed(range(len(rect_list))):
if Reference.__is_similar(rect_list[i], rect):
del(rect_list[i])
return True
return False | [
"def __is_similar(rect, another):\n area1 = rect[2]*rect[3]\n area2 = another[2]*another[3]\n intersect_width = min(rect[0]+rect[2], another[0]+another[2]) - max(rect[0],another[0])\n if not intersect_width > 0:\n return False\n intersect_height = min(rect[1]+rect[3], a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if the rects are of similar size and position. | def __is_similar(rect, another):
area1 = rect[2]*rect[3]
area2 = another[2]*another[3]
intersect_width = min(rect[0]+rect[2], another[0]+another[2]) - max(rect[0],another[0])
if not intersect_width > 0:
return False
intersect_height = min(rect[1]+rect[3], another[1]+a... | [
"def is_rect(self):\n\n #too little vertices\n if len(self) < 3 or len(self) > 4:\n return False\n\n return len(set(self.rows)) == 2 and len(set(self.cols)) == 2",
"def check_rect(df):\n if len(np.unique(df.DISPLAY_COORDS)) == 1:\n print(\"Consistency check: All files hav... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the obj on signal, or returns the signal if obj is None. | def build_or_passthrough(model, obj, signal):
return signal if obj is None else model.build(obj, signal) | [
"def __get__(self, instance, owner):\n if instance is None:\n return self\n\n signal = getattr(instance, self.name, None)\n\n if signal is None:\n signal = self.factory()\n setattr(instance, self.name, signal)\n\n return signal",
"def __get__(self, obj,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
bind methods to the shell | def bind(self, shell):
shell.render_prompt = types.MethodType(self.render_prompt.__func__, shell) | [
"def shell():\n open_shell()",
"def command():\n pass",
"def shell(self, shell):\n\n self._shell = shell",
"def bind(self):\n pass",
"def shell_command(cmd_name):\n def inner(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n func.trac_method = cmd_na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simply returns the original position, default = [0,0,0] Inputs pose [x,y,theta] in [m,m,degrees] Returns pose [x,y,theta] in [m,m,degrees]. | def get_goal_pose(self,pose=[0,0,0]):
return pose | [
"def transform_pose(self, pose):\n return self.transform().apply_to_pose(pose)",
"def pose(self, pose=None):\n return self.__getset_prop(Type.FLOAT, \"pose\", pose)",
"def _pybullet_pose(pose):\n pos = pose[:3]\n rot = pose[3:]\n rot = np.hstack((rot[1:], [rot[0]])) # wxyz -> xyzw\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the range of physical values that should map onto the MLP | def set_physical_minmax(self, min, max):
# This allows you to set the min and the max of the quantity that you want the MLP to measure.
# Once you set this, you can pass in a physical number to get_mlp_value() and it will be mapped to an MLP value and returned
pass
# Maybe we s... | [
"def AdjustRange(self, p_float=..., p_float=...):\n ...",
"def pwm_range(self, rng):\n self.__pwmRange = rng",
"def SetBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):\n ...",
"def setRange(self, minValue, maxValue, step=1.0):\n # For compatibility with... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the torque on the motor from the brakes | def get_motor_load_torque(self):
# Start with the brake normal
# change to 17deg (tan 17?)
# change to torque using the pitch of the thread on the ball screw
# (^ make sure to take friction into account)
# That should give us the torque acting on the motor. If this torque is grea... | [
"def get_motor_torques(self):\n return nu.array(self.status.motor_torque_desired,float)",
"def getTorque(self, *args):\n return _yarp.ITorqueControl_getTorque(self, *args)",
"def torque(self):\n pass",
"def mTorque(self):\n pass",
"def getRefTorque(self, *args):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all my daily_schedule | def get_my_schedules():
schedules = DailyScheduleModel.get_daily_schedules_by_user(g.user.get('id'))
user_schedules = daily_schedule_schema.dump(schedules, many=True)
return custom_response(user_schedules, 200) | [
"def get_schedules(self):\n return self.data['schedules'];",
"def get_schedules():\n path = config.get('schedule', 'paths', './schedule.json')\n with open(path) as schedule_file:\n return json.load(schedule_file)",
"def find_schedule():\n schedule = []\n for week in range(1, NUM_WEEKS + 1):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds field to document contents. Field value can be a list, where each item is added separately (i.e., the field is multivalued). | def __add_to_contents(self, field_name, field_value, field_type):
if type(field_value) is list:
for fv in field_value:
self.__add_to_contents(field_name, fv, field_type)
else:
if len(field_value) > 0: # ignore empty fields
self.contents.append({'f... | [
"def add_custom_field(field, listids):",
"def __add_to_contents(self, field_name, field_value, field_type):\n self.contents.append({'field_name': field_name,\n 'field_value': field_value,\n 'field_type': field_type})",
"def addField(field):",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test when geographic extent changes. | def test_log_update_geographic_extent(self):
log_count_init = LoggerHistory.objects.count()
original_geographic_extent = self.project.geographic_extent
self.project.geographic_extent = GEOSGeometry(
'{"type": "Polygon","coordinates":'
'[[[-0.505,51.682],[-0.53,51.327],'
... | [
"def check_extent(self):\n if self.lower_left.x > self.upper_right.x:\n dlx = self.lower_left.x\n self.lower_left.x = self.upper_right.x\n self.upper_right.y = dlx\n\n if self.lower_left.y > self.upper_right.y:\n dly = self.lower_left.y\n self.low... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test when multiple model fields changes. | def test_log_update_multiple_fields(self):
log_count_init = LoggerHistory.objects.count()
original_isprivate = self.project.isprivate
original_islocked = self.project.islocked
self.project.isprivate = False
self.project.islocked = True
self.project.save()
log_cou... | [
"def assert_changed(company, *fields):\n for field in fields:\n assert getattr(company, field) != getattr(company, f'_prev_{field}')",
"def test_fields_updated_with_computed(self):\n pass",
"def assert_did_not_change(company, *fields):\n for field in fields:\n assert getattr(company, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
kwargs Additional keyword arguments are copied and the copy is passed up to AbstractSimplexParameterType; see documentation for that class for details | def __init__(self, **kwargs):
kwc=kwargs.copy()
AbstractSimplexParameterType.__init__(self, **kwc) | [
"def set_params(self, **kwargs):\n # this is an ugly hack to approximate the right settings when copying the element\n self.current_config = kwargs\n # element disable is a construct used for this container only\n if self._sklearn_disabled in kwargs:\n self.disabled = kwargs[s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ParameterType for Quantities (float, int, etc) value_encoding The intrinsic type of the Quantity kwargs Additional keyword arguments are copied and the copy is passed up to AbstractSimplexParameterType; see documentation for that class for details | def __init__(self, value_encoding=None, uom=None, constraint=None, **kwargs):
kwc=kwargs.copy()
AbstractSimplexParameterType.__init__(self, value_class='NumericValue', **kwc)
if value_encoding is None:
self._value_encoding = np.dtype('float32').str
else:
try:
... | [
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)",
"def qint1(scale):\n return create_quantized_dtype(_builtin_quant_dtypes[\"qint1\"], scale, None)",
"def quantize(self, inputs, dtype):\n if not isinstance(dtype, (types.Fixed, types... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
kwargs Additional keyword arguments are copied and the copy is passed up to AbstractSimplexParameterType; see documentation for that class for details | def __init__(self, **kwargs):
kwc=kwargs.copy()
AbstractSimplexParameterType.__init__(self, **kwc) | [
"def set_params(self, **kwargs):\n # this is an ugly hack to approximate the right settings when copying the element\n self.current_config = kwargs\n # element disable is a construct used for this container only\n if self._sklearn_disabled in kwargs:\n self.disabled = kwargs[s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an order, updates the order with prevailing tax rules onto the order's credit attribute. Then it returns | def apply_tax(order_obj):
tax_rule = taxes.get()
all_credits = order_obj.credits
other_credit = filter(lambda x: x["coll_name"] != taxes.TaxRule.coll_name(), all_credits)
if tax_rule is not None:
order_obj.credits = other_credit + [{
"obj_id": tax... | [
"def update_order_pricing(order, commit=True):\n original_pricing = get_pricing_from_order(order)\n new_pricing = calculate_order_pricing(order)\n\n # avoid an update if the pricing hasn't changed\n if original_pricing == new_pricing:\n return\n\n order.net_cost = new_pricing.net_cost\n ord... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an order, updates the order with prevailing discount rules onto the order's debit attribute | def apply_discounts(order_obj):
all_dedits = order_obj.debits
other_debit = filter(lambda x: x["coll_name"] != discounts.Discount.coll_name(), all_dedits)
all_discounts = discounts.get_all()
valid_discounts = []
for item_dic in order_obj.items:
for d in all_discounts:
item_obj = ... | [
"def set_discount(self, discount):\n ...",
"def set_discount_rate(self, order_value, discount_rate):\r\n if float(discount_rate) == 0:\r\n self.discount_rate = float(RetailCustomer.discount_rate)\r\n else:\r\n self.discount_rate = float(discount_rate)",
"def apply_disc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds a pretrained VGG19 model that outputs image features extracted at the third block of the model | def build_vgg(self):
vgg = VGG19(weights="imagenet")
# Set outputs to outputs of last conv. layer in block 3
# See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py
vgg.outputs = [vgg.layers[9].output]
img = Input(shape=self.hr_shape)
... | [
"def build_vgg():\n input_shape = (256, 256, 3)\n\n vgg = keras.applications.VGG19(include_top = False , input_shape = input_shape , weights=\"imagenet\")\n features = vgg.get_layer(index = 9).output\n\n model = keras.Model(inputs=[vgg.inputs], outputs=[features])\n return model",
"def build_model... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns given vm's/templates's disks collection href or list of disk objects | def getObjDisks(name, get_href=True, is_template=False):
response = get_disk_attachments(
name, 'template' if is_template else 'vm', get_href
)
if get_href:
return response
return get_disk_list_from_disk_attachments(response) | [
"def list_vdisks(client, resource_group_name, vm_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n return virtual_machine.disks",
"def get_disk_attachments(name, object_type='vm', get_href=False):\n api = get_api(object_type, \"%ss\" % object_type)\n obj = api.find(name)\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a Disk object from a disk attached to a vm | def getVmDisk(vmName, alias=None, disk_id=None):
value = None
if disk_id:
prop = "id"
value = disk_id
elif alias:
prop = "name"
value = alias
else:
logger.error("No disk identifier or name was provided")
return None
return get_disk_obj_from_disk_attach... | [
"def show_vdisk(client, resource_group_name, vm_name, disk_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n for disk in virtual_machine.disks:\n if disk.virtual_disk_name == disk_name:\n return disk\n return None",
"def get_disk_obj_from_disk_attachment(disk_attachm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns disk from template collection | def getTemplateDisk(template_name, alias):
template_disks = getObjDisks(
template_name, get_href=False, is_template=True
)
for template_disk in template_disks:
if alias == template_disk.get_alias():
return template_disk
raise EntityNotFound(
"Didn't find disk %s for t... | [
"def storage_file(collection=\"contact\", ext=\"json\"):\n file_dir = Path(environ[\"STORAGE_DIR\"]) / Path(collection)\n file_dir.mkdir(exist_ok=True, parents=True)\n file = file_dir / f\"{time()}.{ext}\"\n return file",
"def download_template(self):\n return self._pj([self.classpath, 'templa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns disk object from disks' collection __author__ = "ratamir" | def get_disk_obj(disk_alias, attribute='name'):
return DISKS_API.find(disk_alias, attribute=attribute) | [
"def GetDisk(self, disk_name: str) -> 'GoogleComputeDisk':\n\n for disk in self.GetValue('disks'):\n if disk['source'].split('/')[-1] == disk_name:\n return GoogleCloudCompute(self.project_id).GetDisk(disk_name=disk_name)\n raise errors.ResourceNotFoundError(\n 'Disk {0:s} was not found in ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare or update disk object according to its kwargs __author__ = jlibosva | def _prepareDiskObject(**kwargs):
storage_domain_name = kwargs.pop('storagedomain', None)
# Tuple (lun_address, lun_target, lun_id, lun_port)
lun = (kwargs.pop('lun_address', None), kwargs.pop('lun_target', None),
kwargs.pop('lun_id', None), kwargs.pop('lun_port', 3260))
# Tuple (username, p... | [
"def __init__(self, filename):\n super(CowDisk, self).__init__()\n self.driveID = 'master'\n self.image = CowDiskImage(child=RawDiskImage(read_only=True),\n read_only=False)\n self.image.child.image_file = filename",
"def __init__(__self__,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Detach disk from VM | def detachDisk(positive, alias, vmName):
logger.info("Detaching disk %s from vm %s", alias, vmName)
disk_attachment = get_disk_attachment(vmName, alias, attr='name')
return DISK_ATTACHMENTS_API.delete(disk_attachment, positive) | [
"def detach_disk(diskName=None):\n pass",
"def disk_detach(vmdk_path, vm):\n\n device = findDeviceByPath(vmdk_path, vm)\n\n if not device:\n # Could happen if the disk attached to a different VM - attach fails\n # and docker will insist to sending \"unmount/detach\" which also fails.\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all disks interfaces/formats/allocation policies permutations possible | def get_all_disk_permutation(
block=True, shared=False, interfaces=(VIRTIO, VIRTIO_SCSI)
):
permutations = []
for disk_format in [FORMAT_COW, FORMAT_RAW]:
for interface in interfaces:
for sparse in [True, False]:
if disk_format is FORMAT_RAW and sparse and block:
... | [
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if disk is in vm disks collection | def check_disk_visibility(disk, disks_list):
is_visible = disk in [disk_obj.get_alias() for disk_obj in disks_list]
return is_visible | [
"def isDisk(result):\n\n disk_identifier = [\"Logical Drive\", \"HDD\", \"Storage\", \"LogVol\"]\n return any(e in result for e in disk_identifier)",
"def is_disk_exist(self, disk_name):\n try:\n bucket = self.get_bucket()\n if bucket is not None:\n bucket_objs = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the disk storage domain name __author__ = "ratamir" | def get_disk_storage_domain_name(disk_name, vm_name=None, template_name=None):
if vm_name and template_name:
logger.error(
"Only one of the parameters vm_name or template_name "
"should be provided"
)
return None
logger.info("Get disk %s storage domain", disk_nam... | [
"def disk_name(self) -> str:\n return pulumi.get(self, \"disk_name\")",
"def get_disk_name():\n return \"%s.dat.disk\" % getpass.getuser()",
"def bucket_domain_name(self) -> str:\n ...",
"def GetDataDiskName(cls, instance):\n name = cls.DATA_DISK_NAME_FMT.format(instance=instance)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Export a disk to glance repository | def export_disk_to_glance(
positive, disk, target_domain, async=False, attr='id'
):
storage_domain = STORAGE_DOMAIN_API.find(target_domain)
disk = DISKS_API.find(disk, attribute=attr)
if not DISKS_API.syncAction(
disk, 'export', storage_domain=storage_domain, positive=positive,
async... | [
"def test_export_template_disk(self):\n assert ll_disks.export_disk_to_glance(\n True, self.disk.get_id(), config.GLANCE_DOMAIN\n ), \"Unable to export disk %s to glance domain %s\" % (\n self.disk.get_id(), config.GLANCE_DOMAIN\n )",
"def test_export(self):\n str... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get list of disk objects from API | def get_all_disks():
return DISKS_API.get(abs_link=False) | [
"def filesystem_list():\n client = get_client()\n client.start()\n for filesystem in client.list_filesystems():\n print(filesystem)\n client.stop()",
"def getObjDisks(name, get_href=True, is_template=False):\n response = get_disk_attachments(\n name, 'template' if is_template else 'vm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a disk attachment object | def prepare_disk_attachment_object(disk_id=None, **kwargs):
disk = kwargs.pop("disk", None)
disk_obj = disk if disk else prepare_ds_object("Disk", id=disk_id)
return prepare_ds_object("DiskAttachment", disk=disk_obj, **kwargs) | [
"def _create_hacked_massive_attachment() -> test_record.Attachment:\n attachment = test_record.Attachment(b'dummy', 'text/plain')\n attachment.size = mfg_event_converter.MAX_TOTAL_ATTACHMENT_BYTES\n return attachment",
"def _create_attachment(self, filename, content, mimetype=None):\n if mimetype is Non... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Samples a disk and waits until disk is found in the specific storage domain or until timeout is reached | def wait_for_disk_storage_domain(
disk, storage_domain, key='id', timeout=600, interval=5
):
disk_name = get_disk_obj(disk, key).get_name() if key == 'id' else disk
for sample in TimeoutingSampler(
timeout, interval, get_disk_storage_domain_name, disk_name
):
if sample == storage_domain:... | [
"def wait_for_creation(self):\n attempts = 10\n created = self.exists()\n while not created and attempts > 0:\n try:\n created = self.exists()\n finally:\n attempts = attempts - 1\n logger.info(\"Waiting for disk creation to com... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return disk obj from disk attachment obj | def get_disk_obj_from_disk_attachment(disk_attachment):
return get_disk_obj(disk_attachment.get_id(), 'id') | [
"def get_disk_attachment(name, disk, attr='id', object_type='vm'):\n disk_list = get_disk_attachments(name, object_type=object_type)\n disk_id = None\n if attr == 'name' or attr == 'alias':\n for disk_obj in disk_list:\n disk_obj_alias = get_disk_obj(\n disk_obj.get_id(), a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return disk obj list from disk attachments list | def get_disk_list_from_disk_attachments(disk_attachments):
return [
get_disk_obj_from_disk_attachment(disk_attachment) for
disk_attachment in disk_attachments
] | [
"def get_disk_attachments(name, object_type='vm', get_href=False):\n api = get_api(object_type, \"%ss\" % object_type)\n obj = api.find(name)\n return DISK_ATTACHMENTS_API.getElemFromLink(obj, get_href=get_href)",
"def getObjDisks(name, get_href=True, is_template=False):\n response = get_disk_attachme... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get disk attachments objects or hrefs from a vm or template | def get_disk_attachments(name, object_type='vm', get_href=False):
api = get_api(object_type, "%ss" % object_type)
obj = api.find(name)
return DISK_ATTACHMENTS_API.getElemFromLink(obj, get_href=get_href) | [
"def getObjDisks(name, get_href=True, is_template=False):\n response = get_disk_attachments(\n name, 'template' if is_template else 'vm', get_href\n )\n if get_href:\n return response\n return get_disk_list_from_disk_attachments(response)",
"def get_disk_attachment(name, disk, attr='id',... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a disk attachment object | def get_disk_attachment(name, disk, attr='id', object_type='vm'):
disk_list = get_disk_attachments(name, object_type=object_type)
disk_id = None
if attr == 'name' or attr == 'alias':
for disk_obj in disk_list:
disk_obj_alias = get_disk_obj(
disk_obj.get_id(), attribute='i... | [
"def get_disk_obj_from_disk_attachment(disk_attachment):\n return get_disk_obj(disk_attachment.get_id(), 'id')",
"def prepare_disk_attachment_object(disk_id=None, **kwargs):\n disk = kwargs.pop(\"disk\", None)\n disk_obj = disk if disk else prepare_ds_object(\"Disk\", id=disk_id)\n return prepare_ds_o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all disks in the system except the OVF store disks | def get_non_ovf_disks():
return [
d.get_id() for d in get_all_disks() if (
d.get_alias() != ENUMS['ovf_disk_alias']
)
] | [
"def get_all_disks():\n return DISKS_API.get(abs_link=False)",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the qcow_version info from disk name or id | def get_qcow_version_disk(disk_name, attribute='name'):
return get_disk_obj(disk_name, attribute).get_qcow_version() | [
"def get_voluuid(disk_object):\n return disk_object.get_image_id()",
"def GetVdiskInfo(self, vdiskid, headers=None, query_params=None, content_type=\"application/json\"):\n uri = self.client.base_url + \"/vdisks/\"+vdiskid\n return self.client.get(uri, None, headers, query_params, content_type)",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the disks contained in a snapshot | def get_snapshot_disks_by_snapshot_obj(snapshot):
return DISKS_API.getElemFromLink(snapshot) | [
"def get_disk_snapshots(pageToken=None):\n pass",
"def get_disks():\n\n if system() != \"Windows\":\n raise OSError(\"For use with Windows platforms.\")\n\n logicaldisks=run(\n [\"wmic\", \"logicaldisk\", \"get\", \"name\"],\n capture_output=True\n )\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all disksnapshots objects list in the given storage domain | def get_storage_domain_diskssnapshots_objects(storagedomain, get_href=False):
from art.rhevm_api.tests_lib.low_level.storagedomains import (
get_storage_domain_obj
)
storage_domain_object = get_storage_domain_obj(storagedomain)
return DISK_SNAPSHOT_API.getElemFromLink(
storage_domain_obj... | [
"def _get_snapshots(ds):\n command = [\"/sbin/zfs\", \"list\", \"-H\", \"-p\", \"-o\", \"name,creation,receive_resume_token\",\n \"-r\", \"-d\", \"1\", \"-t\", \"snapshot\", \"-s\", \"creation\",\n ds]\n if debug:\n print(\"get_snapshots: {}\".format(\" \".join(command)), fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if certain disk is attached to VM as Read Only | def get_read_only(vm_name, disk_id):
return get_disk_attachment(vm_name, disk_id).get_read_only() | [
"def allowed_for_access(self, device: Device) -> bool:\n if self.sys_hardware.disk.is_system_partition(device):\n return False\n\n return True",
"def test_shared_RO_disk(self):\n self.prepare_disks_for_vm(read_only=True, vm_name=self.vm_name_2)\n ll_vms.start_vms([self.vm_na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wait for an event of successful/failed sparsify event starting from the last start sparsify event in the system. | def wait_for_sparsify_event(disk_id, success=True):
import art.rhevm_api.tests_lib.low_level.events as ll_events
disk_name = get_disk_obj(disk_alias=disk_id, attribute='id').get_name()
start_sparsify_query = "\"Started to sparsify %s\"" % disk_name
finished_sparsify_query = (
"%s sparsified succ... | [
"def wait_complete(jobname_synthax):\n # time.sleep(120)\n while not check_complete(jobname_synthax):\n time.sleep(120)",
"def _srsp_event_wait(self, zpi_cmd, timeout = SRSP_WAITING_TIMEOUT_DEFAULT):\r\n if zpi_cmd not in self._srsp_events:\r\n self._srsp_events[zpi_cmd] = threading... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Invoke sparsify action on disk. | def sparsify_disk(disk_id, storage_domain_name, wait=True):
if not do_disk_action(
'sparsify', disk_id=disk_id, target_domain=storage_domain_name,
wait=wait
):
return False
return wait_for_sparsify_event(disk_id) if wait else True | [
"def sparsify_model(path_to_model, sparsified_model_dump_path):\n sparsity_levels = [sl / 10 for sl in range(0, 10)]\n sparsity_levels += [0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 1.0]\n\n norms = [\"L1\", \"L2\"]\n sparse_block_shapes = [(1, 1), (1, 4)]\n\n device = torch.device('cuda')... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Refresh the routing table | def refresh(self):
pass | [
"def update_routing_table(self):\n # copied because it can be modified\n copy_of_routers = list(self.routing_table.routers)\n for router in copy_of_routers:\n new_routing_table = self.fetch_routing_table(router)\n if new_routing_table is not None:\n self.rou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attempt to add the given node to the routing table. | def addNode(self, node: dht.node.Node):
bucket = self._findBucket(node)
if bucket == None:
raise Exception("Found no bucket for given id")
if not node in bucket:
# We do not have this node on our routing table yet;
# attempt to add it.
... | [
"def add_node(self, node):",
"def add(self, node):\n pass",
"def _add_node(self, node: int) -> None:\r\n self.nodes.add(node)",
"def add_node(self, node):\n \n if node in self.node_set:\n return \n \n self.num_node = self.num_node + 1\n self.node_set... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the appropriate bucket for the given node | def _findBucket(self, node):
for bucket in buckets:
if bucket.inRange(node):
return bucket
#if bucket.low <= node and node <= bucket.high:
# return bucket
return None | [
"def _get_node(self, key):\r\n hash_code = self._hash(key)\r\n idx = self._index(hash_code)\r\n # extract the head of the corresponding bucket\r\n cur = self.buckets[idx]\r\n while cur:\r\n if cur.key == key:\r\n return cur\r\n cur = cur.next\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the K nodes in the routing table closest to the given target ID. | def findClosestNodes(self, target: hash.hash.Hash):
# TODO: make more efficient
# See: http://stackoverflow.com/questions/30654398/implementing-find-node-on-torrent-kademlia-routing-table
nodes = []
for bucket in self.buckets:
nodes = nodes + bucket.nodes
... | [
"def find_close_nodes(self, target): \r\n K=8\r\n nodes = [] \r\n if len(self.buckets) == 0: return nodes \r\n index = self.bucket_index(target) \r\n nodes = self.buckets[index].nodes \r\n min = index - 1 \r\n max = index + 1 \r\n while len(nodes) < K and ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove the given bucket from the routing table, split the bucket in two buckets each spanning halve the original bucket's ID space, redistribute the nodes to the appropriate buckets and add the buckets to the routing table. | def _splitBucket(self, bucket):
idx = self.buckets.index(bucket)
self.buckets.pop(idx)
middle = int(bucket.low + (bucket.high - bucket.low)/2)
bucketLow = Bucket(bucket.low, middle, bucket.refreshed)
bucketHigh = Bucket(middle+1, bucket.high, refreshed.refreshed)
... | [
"def delete_bucket_replication(Bucket=None):\n pass",
"def remove_bucket(self, uri):\n cdef bytes buri = unicode_path(uri)\n check_error(self.ctx,\n tiledb_vfs_remove_bucket(self.ctx.ptr, self.ptr, buri))\n return",
"def _SplitLargeBucket(bucket):\n old_children = buc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a call status class based on the monitoring backend | def create_call_status(job, internal_storage):
monitoring_backend = job.config['lithops']['monitoring']
Status = getattr(lithops.worker.status, '{}CallStatus'
.format(monitoring_backend.capitalize()))
return Status(job, internal_storage) | [
"def create(status, description):\n try:\n class_name = STATUS_TEXT[status].replace(' ', '') + \"Status\"\n return globals()[class_name](description)\n except KeyError:\n return ManagementError(status, description)",
"def call(self, status):\n # type: (Union[int, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sends the init event | def send_init_event(self):
self.status['type'] = '__init__'
self._send() | [
"def initialize(self):\n logger.debug(\"Begin Generation\")\n self.events.begin_generation()",
"def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sends the finish event | def send_finish_event(self):
self.status['type'] = '__end__'
self._send() | [
"def finished(self):\n pass",
"def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False",
"def Finish(self):\n\t\n self.queue.join()",
"def task_finished(self) -> None:\n self.debug(\"Task reports that it is finished\")\n self._close_im... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
5x5conv filter preserves fmap dimensions if stride=1 exactly halves fmap dimensions if stride=2 requires padding=2, dilation=1, kernel_size=5 becomes depthwise convolution when in_planes = out_planes = groups | def conv5x5(in_planes, out_planes, stride=1, groups=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride, groups=groups,
padding=2, dilation=1, bias=False) | [
"def conv5x5(in_planes, out_planes, stride=1, groups=1, dilation=1):\n\n return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride,\n padding=2, groups=groups, bias=False, dilation=dilation)",
"def conv5x5_block(in_channels,\n out_channels,\n str... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build a stack of blocks | def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3,
SE=False, expansion=3, stride=1):
norm_layer = self._norm_layer
act_layer = self._act_layer
downsample = None
# if stride > 1
# or if block input planes != block output planes (only... | [
"def build_nested_blocks(self):\n pass",
"def build_blocks():\n block_1 = GRect(375, 80, x=20, y=330)\n block_1.filled = True\n block_1.color = 'firebrick'\n block_1.fill_color = 'firebrick'\n window.add(block_1)\n block_2 = GRect(375, 80, x=405, y=330)\n block_2.filled = True\n blo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
mnasneta1 w.t. 3x3MBconv3 block only | def mnasneta1_3x3mbconv3(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],
kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],
dropout=0, pretrained=pretraine... | [
"def mnasneta1_3x3mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
mnasneta1 w.t. 3x3MBconv3SE block only | def mnasneta1_3x3mbconv3se(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],
kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],
dropout=0, pretrained=pretrained, p... | [
"def block_3(self):\n \n with tf.variable_scope(\"block_3\",reuse=tf.AUTO_REUSE):\n \n tf.set_random_seed(self.seed)\n \n ## first convolution:\n conv_1 = self.conv2d(self.block_2,self.VGG16_weights[8],\n self.VGG16_weights[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
mnasneta1 w.t. 5x5MBconv3 block only | def mnasneta1_5x5mbconv3(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],
kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],
dropout=0, pretrained=pretraine... | [
"def mnasneta1_5x5mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |