query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Testing unpack frame function against CONNECTED | def testFrameUnpack3(self):
msg = """CONNECTED
session:ID:snorky.local-49191-1185461799654-3:18
"""
result = stomper.unpack_frame(msg)
self.assertEqual(result['cmd'], 'CONNECTED')
self.assertEqual(result['headers']['session'], 'ID:snorky.local-49191-1185461799654-3:18')
self.ass... | [
"def testFramepack2(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n frame.cmd = 'DISCONNECT'\n result = frame.pack()\n correct = 'DISCONNECT\\n\\n\\x00\\n'\n self.assertEqual(result, correct)",
"def test_missingNullAfterGoodFrame(self):\n self.de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a diff for field if it's changed and None otherwise. | def get_field_diff(self, field_name):
return self.diff.get(field_name, None) | [
"def fields_diff(self):\n added = self.new_tree.added_fields(self.prev_tree)\n removed = self.prev_tree.added_fields(self.new_tree)\n return added, removed",
"def _field_was_changed(self):\n field_map = self._field_map\n for field in field_map.itervalues():\n if field... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if default_backend() suport cipher and mode combination | def test_compatibility(cipher, mode):
chiper_obj = cipher_params(cipher, os.urandom(length_by_cipher[cipher]))[0] #need to be object, not interface, to validate_for_algorithm work
if chiper_obj.name == "ChaCha20":
return True
mode_object = None
if mode == 'CBC':
mode_object = modes.CBC... | [
"def check_backend():\n raise NotImplementedError",
"def guess_mode(alg) -> str:\n plaintext = b'e'*48\n if is_ecb(alg(plaintext)):\n return 'ECB'\n else:\n return 'CBC'",
"def get_encryption_mode():\r\n\r\n msg = 'Do you want to encrypt ({0}) or decrypt ({1})? '.format(\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
clip(arr,thresh=3.5) Simple sigmaclipping algorithm. Returns avg,std of clipped array. | def clip(arr,thresh=3.5):
a = numpy.array(arr)
avg,std = a.mean(),a.std()
while 1:
avg,std,size = a.mean(),a.std(),a.size
a = a[abs(a-avg)<thresh*std]
if size==a.size:
break
return avg,std | [
"def clip(arr,thresh=3.5):\n\ta = arr.copy()\n\n\tavg,std = a.mean(),a.std()\n\twhile 1:\n\t\tsize = a.size\n\t\ta = a[abs(a-avg)<thresh*std]\n\t\tavg,std = a.mean(),a.std()\n\t\tif size==a.size:\n\t\t\tbreak\n\treturn avg,std",
"def sigma_clip(arr,sigma=3):\n\n cliparr = range(len(arr)) # initialize\n arr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check_star(peaks,data) Determines whether or not a slit looks like it is a starbox. This is done by simply checking the 3pixels bordering each peak and ensuring that none are less than half of the peak (ie that the FWHM>7 pixels). Returns True if more than half of the peaks look like boxes, otherwise returns False. | def check_star(peaks,data):
star = 0
for i in peaks:
max = data[i]
if i<3 or i+4>data.size:
continue
mean = data[i-3:i+4].mean()
if (max-mean)<0.1*max:
star += 1
if star*2>peaks.size:
return True
else:
return False | [
"def checkPeaks(peaks, nwindows):\n nvalleys = len(peaks)-2\n if nvalleys != (nwindows - 1):\n print(\"Error: number of valleys should be {}! Found {}.\".format(nwindows-1, nvalleys))\n return True # Continue looping\n else:\n print(\"OK: Found {} valleys.\".format(nvalleys))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
findlines(z) Quickly find the peaks of arclines. Returns a list containing the peak locations. | def findlines(z,bgsub=True,SATURATED=57000.):
z = z.copy()
s = z.copy()
""" First identify peaks. """
max = ndimage.maximum_filter(z,9)
p = scipy.where((max==z)&(z<SATURATED)&(max>0))[0]
s = z[p]
""" Reject low peaks. """
bg = ndimage.percentile_filter(s,10,21)
peaks = scipy.where(... | [
"def find_lines(name, num):\n fn = '%s/disp/%s.1d.fits' % (name, num)\n hdulist = pyfits.open(fn)\n data = hdulist[0].data\n header = hdulist[0].header\n locations = []\n for line in LINES:\n line_loc = get_wavelength_location(header, line)\n locations.append(find_line_peak(data, lin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
id_slits(arc,find_stars=True) Determine the top/bottom of each slit in the 2d ycorrected arc image. If find_stars is True, returns slit,starbox, otherwise returns slit. | def id_slits(arc,find_stars=True,chilimit=2.5,SATURATED=57000.,useLines=True):
arc = arc.copy()
""" Attempt to avoid saturated lines """
w = arc.shape[1]
tmp = arc.copy()
tmp[tmp>SATURATED] = 0.
tmpSorted = scipy.sort(tmp,axis=1)
flux = tmpSorted[:,w*0.97:w*0.98].mean(axis=1)
minflux = ... | [
"def id_slits(flat_data,findstars=True):\n\n\ty_axis = flat_data.shape[0]\n\n\tdata = flat_data.mean(axis=1)\n\td = data.copy()\n\n\t\"\"\"\n\tThe slits tend to be demarcated by when the sorted data begins to\n\t grow at an accelerating rate; the first derivative tends to be an\n\t acceptable proxy, though. The e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively merge new into base. | def merge_dict(base, new, extend=True):
if isinstance(new, dict):
for key, value in new.items():
if key not in base:
base[key] = value
elif extend and isinstance(value, dict):
base[key] = merge_dict(
base=base.get(key, {}), new=val... | [
"def merge_configs(base, new, wildcard_key='XXX'):\n for key in new:\n base_keys = list(base) if key == wildcard_key else [key]\n for base_key in base_keys:\n if base_key not in base:\n base[base_key] = copy.deepcopy(new[key])\n elif isinstance(base[base_key], d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the SHA256 sum of a given object. The object used for generating a SHA256 must be JSON compatible. | def object_sha256(obj):
return hashlib.sha256(json.dumps(obj).encode()).hexdigest() | [
"def compute_obj_hash(obj) -> str:\n str_obj = json.dumps(obj, cls=BestEffortJSONEncoder, sort_keys=True)\n return hashlib.sha256(str_obj.encode('utf-8')).hexdigest()",
"def hash(obj):\n \n import hashlib\n import pickle\n \n sha = hashlib.sha256()\n sha.update(pickle.dumps(obj))\n \n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the SHA1 sum of a given object. The object used for generating a SHA1 must be JSON compatible. | def object_sha1(obj):
return hashlib.sha1(json.dumps(obj).encode()).hexdigest() | [
"def _sha1_hash_json(self, value):\n hash = hashlib.new(\"sha1\")\n binary_value = value.encode(\"ascii\")\n hash.update(binary_value)\n sha1_res = hash.hexdigest()\n return sha1_res",
"def sha1(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA1_DIGEST)\n d.update(data)\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructor. iReader is the IndexReader object on which the search should be performed | def __init__(self, iReader):
self.ireader = iReader
self.num_of_doc = iReader.getNumberOfDocuments() | [
"def __init__(self, iReader):\n self.__index_reader = iReader",
"def __init__(self, iIndex, rIndexer):\n self.index = iIndex\n self.indexer = rIndexer",
"def searcher(self, **kwargs):\n\n from whoosh.searching import Searcher\n return Searcher(self.reader(), fromindex=self, **... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the nucleus_security_id of this MdHistoryRequestCO. | def nucleus_security_id(self, nucleus_security_id):
self._nucleus_security_id = nucleus_security_id | [
"def security_group_id(self, security_group_id):\n self._security_group_id = security_group_id",
"def security_group_id(self, security_group_id):\n\n self._security_group_id = security_group_id",
"def security_user_id(self, security_user_id):\n\n self._security_user_id = security_user_id",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the product of this MdHistoryRequestCO. | def product(self, product):
self._product = product | [
"def product(self, product):\n self._product = product",
"def setProduct(self, product):\n # First we delete an old reference. (This is used for changing \n # properties/variants within cart)\n self.deleteReferences(\"cartitem_product\")\n self.addReference(product, \"cartitem_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform experiment on bivariate dataset generated from 3 Gaussians. | def perform_bivariate_3_gaussians_exp(N, pp, mu_1, mu_2, mu_3,
sigma_1, sigma_2, sigma_3,
truncation_bounds, censoring_bounds,
max_iteration=50, seed=100):
# Fix the random state
random.seed(seed)
... | [
"def main():\n kwargs = parse_args()\n set_verbosity(kwargs.pop('v'))\n test_gaussians(**kwargs)",
"def prob3():\n h = lambda x : x > 10\n MC_estimates = []\n for N in xrange(5000,505000,5000):\n X = np.random.gamma(9,scale=0.5,size=N)\n MC = 1./N*np.sum(h(X))\n MC_estimates... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the horizontal and vertical fairlead force in a 2D plane along the straightline line. Must ensure update_states() is called before accessing this function. The function will not solve the forces for a new vessel position if it updated. , otherwise the fairlead forces are not updated with the new | def get_fairlead_force_2d(self, index):
H_ref = c_double(-999.9)
V_ref = c_double(-999.9)
Map.lib.map_get_fairlead_force_2d( pointer(H_ref), pointer(V_ref),self.f_type_d, index, self.status, pointer(self.ierr))
return H_ref.value, V_ref.value | [
"def _compute_forces(self):\n # get new coeffs\n self._get_coeffs()\n\n # instead of writing many time\n awa = self.awa / 180.0 * np.pi\n\n # lift and drag\n self.lift = 0.5 * self.rho * self.aws ** 2 * self.area * self.cl\n self.drag = 0.5 * self.rho * self.aws ** 2... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the horizontal and vertical fairlead force in a 3D frame along relative referene global axis. Must ensure update_states() is called before accessing this function. The function will not solve the forces for a new vessel position if it updated. , otherwise the fairlead forces are not updated with the new | def get_fairlead_force_3d(self, index):
fx = c_double(-999.9)
fy = c_double(-999.9)
fz = c_double(-999.9)
Map.lib.map_get_fairlead_force_3d( pointer(fx), pointer(fy), pointer(fz), self.f_type_d, index, self.status, pointer(self.ierr))
return fx.value, fy.value, fz.value | [
"def calculate_near_field_forces_and_moments(self):\n # Initialize a variable to hold the global panel position as the panel's are\n # iterate through.\n global_panel_position = 0\n\n # Initialize three lists of variables, which will hold the effective strength\n # of the line vor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
US02 Birth should occur before the marriage of an individual | def birthBeforeMarriage(individual):
birthDate = individual.get_birth_data()[0]
marriageDates = gedcom_parser.get_marriages(individual)
if marriageDates and birthDate:
earliestMarriageDate = (min(convertGedcomDate(
date[0]) for date in marriageDates))
birthDate = convertGedcomDa... | [
"def birth_before_marriage(self):\n query = \"select INDI, NAME, BIRT, fam.MARR from indi INNER JOIN fam ON INDI.INDI = FAM.HUSB OR INDI.INDI = \" \\\n \"FAM.WIFE \"\n for row in self.query_info(query):\n birth = row[2]\n marriage = row[3]\n if not self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
US03 Birth should occur before death death of an individual | def birthBeforeDeath(individual):
birthdate = individual.get_birth_data()[0]
deathdate = individual.get_death_data()[0]
if birthdate and deathdate:
birthdate = convertGedcomDate(birthdate)
deathdate = convertGedcomDate(deathdate)
if deathdate < birthdate:
print(
... | [
"def birth_before_death(self):\n query = \"select INDI, NAME, BIRT, DEAT from indi\"\n for row in self.query_info(query):\n birth = row[2]\n death = row[3]\n if not self.date_before(birth, death):\n print(\"ERROR: US03: Birth {} occurs after death {} for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
US01 Dates (birth, marriage, divorce, death) should not be after the current date | def datesBeforeCurrentDate(individual):
birthdate = individual.get_birth_data()[0]
deathdate = individual.get_death_data()[0]
marriageDates = gedcom_parser.get_marriages(individual)
fams = gedcom_parser.get_families(individual)
childElements = [(fam.get_child_elements()) for fam in fams]
divor... | [
"def birth_before_death(self):\n query = \"select INDI, NAME, BIRT, DEAT from indi\"\n for row in self.query_info(query):\n birth = row[2]\n death = row[3]\n if not self.date_before(birth, death):\n print(\"ERROR: US03: Birth {} occurs after death {} for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
US05 Marriage should occur before death of either spouse | def marriageBeforeDeath(individual):
deathDate = individual.get_death_data()[0]
marriageDates = gedcom_parser.get_marriages(individual)
if marriageDates and deathDate:
latestMarriageDate = (max(convertGedcomDate(
date[0]) for date in marriageDates))
deathDate = convertGedcomDate... | [
"def marriage_before_death(self):\n query = \"select INDI, NAME, DEAT, fam.MARR from indi INNER JOIN fam \" \\\n \"ON INDI.INDI = FAM.HUSB OR INDI.INDI = FAM.WIFE\"\n\n for row in self.query_info(query):\n death = row[2]\n marry = row[3]\n if not self.da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
US11 Marriage should not occur during marriage to another spouse | def noBigamy(individual):
families = gedcom_parser.get_families(individual)
marriageDateRanges = []
for family in families:
marriageDate = None
divorceDate = None
for element in family.get_child_elements():
if element.get_tag() == "MARR":
marriageDate =... | [
"def marriage_before_death(self):\n query = \"select INDI, NAME, DEAT, fam.MARR from indi INNER JOIN fam \" \\\n \"ON INDI.INDI = FAM.HUSB OR INDI.INDI = FAM.WIFE\"\n\n for row in self.query_info(query):\n death = row[2]\n marry = row[3]\n if not self.da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
US14 No more than five siblings should be born at the same time | def multipleBirths(family):
children = gedcom_parser.get_family_members(family, 'FAMILY_MEMBERS_TYPE_CHILDREN')
birthdays = []
for child in children:
birthdays.append(convertGedcomDate(child.get_birth_data()[0]))
if len(birthdays) < 5:
return True
else:
birthdayCounts = dic... | [
"def test_too_many_siblings(self):\r\n a_family = _create_family()\r\n child_15 = _create_individual(\"C15\", \"Jeffrey\", \"Jamison\")\r\n a_family.children.append(child_15.id)\r\n\r\n self.assertFalse(sibling_count.less_than_15_siblings(a_family))",
"def test_not_too_many_siblings(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
US53 Return True if the given individual has any of the individual errors listed in this file | def listErrors(individual): #TODO: add other errors implemented elsewhere
results = [birthBeforeMarriage(individual), birthBeforeDeath(individual), marriageBeforeDeath(individual),
datesBeforeCurrentDate(individual), noBigamy(individual)]
results = [x for x in results if x is not None]
return ... | [
"def _in_any(reason, err_haystack):\n for err in err_haystack:\n if reason.find(six.text_type(err)) != -1:\n return True\n return False",
"def has_errors(self) -> bool:",
"def HasErrors(self):\n for name in self._GetStreamNames():\n if name.startswith('error_data.'):\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete characters from (upperRow, upperCol) up to (lowerRow, lowerCol) using the current selection mode. | def doDelete(self, upperRow, upperCol, lowerRow, lowerCol):
if app.config.strict_debug:
assert isinstance(upperRow, int)
assert isinstance(upperCol, int)
assert isinstance(lowerRow, int)
assert isinstance(lowerCol, int)
assert upperRow <= lowerRow
... | [
"def delete_characters(self, count=None):\r\n count = count or 1\r\n\r\n for _ in range(min(self.columns - self.cursor.x, count)):\r\n self[self.cursor.y].pop(self.cursor.x)\r\n self[self.cursor.y].append(self.cursor.attrs)",
"def _on_delete_pressed(self) -> None:\n if(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the marker and pen pair as the earlier of the two then the later of the two. The result accounts for the current selection mode. | def startAndEnd(self):
upperRow = 0
upperCol = 0
lowerRow = 0
lowerCol = 0
if self.selectionMode == kSelectionNone:
upperRow = self.penRow
upperCol = self.penCol
lowerRow = self.penRow
lowerCol = self.penCol
elif self.select... | [
"def get_a_point(self):\n pick_from = list(self.space.marks.difference(set([psituple[1] for psituple in self.psis])))\n if len(pick_from) > 0:\n return pick_from[0]\n else:\n return self.psis[0][1]",
"def get_second_point(self):\n return self.x2, self.y2",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new instance of the consumer class, passing in the AMQP URL used to connect to RabbitMQ. | def __init__(self, amqp_url, *handlers):
self._consumer_tags = []
RabbitMQ.__init__(self, amqp_url)
# save our handlers for ruture use
self._handlers = {}
for handle in handlers:
for k, v in handle.handlers().items():
self._handlers[k] = v | [
"def __init__(self, amqp_url):\n self.should_reconnect = False\n self.was_consuming = False\n\n self._connection = None\n self._channel = None\n self._closing = False\n self._consumer_tag = None\n self._url = amqp_url\n self._consuming = False\n # In pr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method is used to pass encoded body to a handler and return it's value. Errors are handled and returned via "error" field. | def _wrap_handler(self, handler, body):
try:
decoded_body = json.loads(body)
result = yield handler(decoded_body)
return result
except Exception as e:
return {"error": str(e)} | [
"def decode_body(self):\n self.body = self.body.decode(self.encoding)",
"def get_body(self):\r\n fp = self._environ['wsgi.input']\r\n return fp.read()",
"def _unicode_body__get(self):\n if not self.charset:\n guess = chardet.detect(self.body)\n self.charset = gu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get duty score of each category. We don't calculate each post score, we think what a man like can be described on category level. | def get_duty_cate_score(chosen_duty_list: list) -> pmag.MagicDict:
res = pmag.MagicDict()
for w, cate in chosen_duty_list:
freq = MODEL[cate]['duty'][w]['freq']
prob = MODEL[cate]['duty'][w]['prob']
score = prob # freq * prob / DUTY_NF[cate]
if cate in res:
res[cate]... | [
"def strand_category_average_analysis(self):\n diversities = self.get_category_diversities()\n total_los = self.get_num_learning_outcomes()\n total_category_hits = sum(num_cats * num_los for num_cats, num_los in zip(diversities.keys(), diversities.values()))\n average = round(total_categ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get require score of each post under all categories. | def get_require_post_score(chosen_require_list: list) -> pmag.MagicDict:
res = pmag.MagicDict()
for w, cate in chosen_require_list:
posts = MODEL[cate]['posts']
for post in [*posts]:
if w in posts[post]['require']:
freq = posts[post]['require'][w]['freq']
... | [
"def sum_of_require(categories):\n return sum(map(lambda x: x.requirement, categories))",
"def list_all_cat_scores(self):\n\n categories = Category.objects.all()\n score_before = self.score\n output = {}\n\n for cat in categories: # for each of the categories\n # group 1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Another method to get demand score for each post. It is the same as `get_demand_post_score` | def get_demand_post_score_from_require_res(
require_post_score: dict) -> pmag.MagicDict:
res = pmag.MagicDict()
for cate, posts in require_post_score.items():
for post, _ in posts.items():
demand = MODEL[cate]['posts'][post]['demand']
score = ((demand['continuous_freq'] +... | [
"def get_require_post_score(chosen_require_list: list) -> pmag.MagicDict:\n res = pmag.MagicDict()\n for w, cate in chosen_require_list:\n posts = MODEL[cate]['posts']\n for post in [*posts]:\n if w in posts[post]['require']:\n freq = posts[post]['require'][w]['freq']\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if num1 and num2 are within a sliver of a floating point m gives the multplier on the floating point precision >>> np.exp(np.log(.1)) == .1 False >>> approx(np.exp(np.log(.1)), .1) True | def approx(num1, num2, m=4, precision=None):
if precision:
return abs(num1-num2) <= 10**precision
else:
return abs(num1-num2) <= m * np.spacing(1) | [
"def float_equal( a , b , eps = EPS):\n print(a,b)\n return numpy.fabs(a-b) < eps",
"def isGE(self, a : float, b : float) -> bool:\n return (a >= b - self.tol * max(abs(a), abs(b), 1.0)) #and (a >= b - 0.1)",
"def approxEquals(x1, x2, epsilon = 0.005):\n return (x1 > (x2 - epsilon)) and (x1 < (x2... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if every element of l1 and l2 are approximately equivalent >>> l1 = [.1, .2, .3] >>> l2 = [np.exp(np.log(l)) for l in l1] >>> all_approx(l1, l2) True >>> l1[0]==l2[0] False >>> l2 = l2+[.4] >>> all_approx(l1, l2) False >>> all_approx(l2, l1) False >>> l1 = l1+[.4] >>> all_approx(l1, l2) True | def all_approx(l1, l2, m=4):
same = (len(l1) == len(l2))
i = 0
while same and i < len(l1):
same = approx(l1[i], l2[i], m)
i += 1
return same | [
"def coords_equal(wp1, wp2, approx=True):\n if approx:\n d = gps_dist(wp1, wp2)\n if d >= GPS_ERROR:\n rospy.logdebug(\"Lists are not the same because WPs are %2.6f apart\"%d)\n return d < GPS_ERROR\n else:\n return wp1.lat==wp2.lat and wp1.lon==wp2.lon and wp1.alt==wp2.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the index of the bin that feature belongs in. bin_bounds contains the upper bound on each bin's value, sorted. | def get_bin_index(signal, bins):
# could do this binary search-like at least
index = 0
for v in bin_bounds:
if signal > v:
return index
index += 1
return index - 1 | [
"def bin_index(self, xnorm):\n return _bin_index(xnorm, self.nbins, self.padding)",
"def getBinIndex(self, x):\n\t\tb = -1\n\t\tif x == self._max_val: # final bin is [low, high], where others are [low,high)\n\t\t\tb = len(self._bins)-1\n\t\telse:\n\t\t\tb = math.floor((x-self._min_val)/self._bin_width)\n\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the index of the most significant change bit, the bit that will change when t is incremented >>> mscb(0) 0 >>> mscb(1) 1 >>> mscb(7) 3 >>> mscb(8) 0 | def mscb(t):
return int(np.log2(t ^ (t + 1))) | [
"def index_of_least_significant_zero_bit(self, value):\n\n index = 1\n while (value & 1) != 0:\n value >>= 1\n index += 1\n return index",
"def get_bit_position(self) -> int:\n\t\tassert 0 <= self._num_bits_remaining <= 7, \"Unreachable state\"\n\t\treturn -self._num_bit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the index of the largest value, randomly breaking ties. >>> argmax([0, 1]) 1 >>> argmax([1, 0]) 0 >>> a = argmax([0, 1, 1]) >>> a in (1, 2) True | def argmax(values):
values = np.array(values)
mx = np.max(values)
val = np.where(values==mx)[0]
return np.random.choice(val) | [
"def _arg_max(next_state):\n max_index_list = []\n max_value = next_state[0]\n for index, value in enumerate(next_state):\n if value > max_value:\n max_index_list.clear()\n max_value = value\n max_index_list.append(index)\n elif... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the numerator such that at t=0, a/(decay+t)=alpha | def calc_alpha_init(alpha, decay):
if not decay or decay <= 0:
return alpha
else:
return float(alpha * decay) | [
"def linear_decay(x0, alpha, T, t):\n if t <= T:\n return x0 - (1 - alpha) * x0 * t / T\n else:\n return alpha * x0",
"def exponential_decay(x, a, t):\n return 1 + a * np.exp(-x / t)",
"def get_rate(self, t):\n return self.l_0 + \\\n self.alpha * sum(np.exp([self.beta * -1.0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take a vector a values and a integer window size Return the vector of values that are the mean over n steps. Note that (right now at least) the returned vector will be n1 elements smaller. >>> running_mean([1, 2, 2, 4, 1, 1], 2) array([ 1.5, 2. , 3. , 2.5, 1. ]) >>> running_mean([1, 1, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2], 4)... | def running_mean(data, n):
return np.convolve(data, np.ones((n, ))/n)[(n-1):-(n-1)] | [
"def running_mean(data, n):\n mean = np.convolve(data, np.ones(n), mode=\"full\")\n out_mean = np.zeros((len(data)))\n for i in range(len(data)):\n if i + 1 < n:\n out_mean[i] = mean[i] / (i + 1)\n else:\n out_mean[i] = mean[i] / n\n return out_mean",
"def moving_av... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a list of items into a string with special characters removed Underscores are left >>> join_items(['a','b','c']) 'abc' >>> join_items(['a[0]','a[1]']) 'a0a1' >>> join_items(['true_value',8]) 'true_value8' >>> join_items(['true_values',.1,.2]) 'true_values0.10.2' >>> join_items(['elbow_joint','wrist_joint','sho... | def join_items(values, sort=False):
if isinstance(values, str):
return clean_string(values)
try:
val = []
for v in values:
val.append(clean_string(v))
if sort:
val.sort()
return "-".join(val)
except TypeError:
return str(values) | [
"def _join(self, items, separator=' '):\n \n return separator.join(map(lambda s: self._encode(s), items));",
"def list_join(the_list):\n return ' '.join(the_list)",
"def implode(delim, items):\n return delim.join(items)",
"def space_join(*items):\n valid_items = []\n for item in items:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Splits a string of separated items into its component parts. >>> split_items('true_values0.10.2') ['true_values', 0.1, 0.2] >>> split_items('abc') ['a', 'b', 'c'] >>> split_items('true_value8') ['true_value', 8] >>> split_items('elbow_jointshoulder_jointwrist_joint') ['elbow_joint', 'shoulder_joint', 'wrist_joint'] >>>... | def split_items(item_string):
parts = item_string.split('-')
items = []
# now clean up the types
for v in parts:
if v.isnumeric():
items.append(int(v))
elif v == 'None':
items.append(None)
else:
try:
items.append(float(v))
except:
items.append(v)
return items | [
"def splitItems(items):\n if isinstance(items, str):\n usable = items.split()\n else:\n usable = items\n\n numItems = len(usable)\n\n vals = [usable[ii] for ii in range(0, numItems, 2)]\n uncs = [usable[ii] for ii in range(1, numItems, 2)]\n\n return vals, uncs",
"def split_cmdline... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Looks for files in the given directory that have filenames that match required_features (using feature1_feature2.txt naming convention) If no files exist, return the standard filename Otherwise return the file that includes all the required features with the fewest extra features | def get_best_file_match(required_features, base_dir):
best_match = join_items(remove_modifiers(required_features), sort=True)
required = set(split_items(best_match))
best_match = best_match+".txt"
fe = file_exists(os.path.join(base_dir, best_match), check_zip=True)
if fe:
return os.path.split(fe)[1]
options =... | [
"def feature_label_pair(path, feature_reg, label_reg):\n feature_file = ''\n label_file = ''\n for file in os.listdir(path):\n if fnmatch.fnmatch(file, feature_reg):\n feature_file = os.path.join(path, file)\n if fnmatch.fnmatch(file, label_reg):\n label_file = os.path.j... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take off extension, and check for another if extension is gz >>> split_ext('tmp.txt') ('tmp', '.txt') >>> split_ext('tmp.txt.gz') ('tmp', '.txt.gz') | def split_ext(filepath):
(fn, ext) = os.path.splitext(filepath)
if ext=='.gz':
(fn, ext) = os.path.splitext(fn)
ext += '.gz'
return (fn, ext) | [
"def _splitzipext(self, filename):\n\n if self._iszip(filename):\n return os.path.splitext(filename)\n else:\n return filename, None",
"def splitext_zip(fname):\n base_fname, ext = splitext(fname)\n if ext == '.gz' or ext == '.zip':\n base_fname, ext2 = splitext(ba... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a shorter version of the keys in params | def shorten_keys(params):
param_names = {}
for n in params:
parts = n.split('_')
firsts = [p[0] for p in parts]
param_names[n] = ''.join(firsts)
return param_names | [
"def join_params(**params):\n\tparam_list = get_sorted_keys(params)\n\tvalues = []\n\tfor k in param_list:\n\t\tvalues.append(k+'-'+join_items(params[k]))\n\treturn \"_\".join(values)",
"def _plain_format_params(params: Dict[str, str]) -> str:\n return \"\".join([f\"{key}: {value}\\n\" for key, value in pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a string from the keyvalue pairs with _ separating them, sorted by key >>> join_params(alpha=.5, gamma=.9) 'alpha0.5_gamma0.9' >>> join_params(features=['a','b','c'],depth=15) 'depth15_featuresabc' >>> join_params(alpha=.1, trace_rate=None, l=['a','b']) 'alpha0.1_lab_trace_rateNone' | def join_params(**params):
param_list = get_sorted_keys(params)
values = []
for k in param_list:
values.append(k+'-'+join_items(params[k]))
return "_".join(values) | [
"def make_paramstr(self, parameters, key_val_sep=\" \", param_sep=\" \"):\n tmp = list()\n for p in list(parameters):\n if isinstance(p, tuple):\n p = key_val_sep.join(p)\n tmp.append(p)\n return param_sep.join(tmp)",
"def pp_join_signature(func: Function)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Turns a dictionary of parameters into a commandline list of arguments | def params_to_args(**params):
args = []
keys = get_sorted_keys(params)
for k in keys:
if params[k] == False:
continue
args.append('--'+k)
if params[k] == True:
continue
if isinstance(params[k], str):
args.append(params[k])
continue
try:
args.extend([str(v) for v in params[k]])
except:
... | [
"def kwargs_to_command_line(kwargs):\n cmd_line = []\n for key, value in kwargs.items():\n option = key_to_option(key)\n if value is None:\n arg = \"--{option}\".format(option=option)\n else:\n arg = \"--{option}={value}\".format(\n option=option, valu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an list of the directories in dirpath, starting with the directory in base_dir If base_dir is provided but dirpath does not contain it, return None >>> get_all_dirs('/tmp/asdf/fred') ['tmp', 'asdf', 'fred'] >>> get_all_dirs('/tmp/asdf/fred/') doesn't care about final slash ['tmp', 'asdf', 'fred'] >>> get_all_dir... | def get_all_dirs(dirpath, base_dir=None):
if not base_dir:
post = os.path.normpath(dirpath)
elif base_dir in dirpath:
(pre, post) = dirpath.split(os.path.normpath(base_dir))
post = os.path.normpath(post)
else:
return
dirs = []
(head, tail) = os.path.split(post)
while tail:
dirs.append(tail)
(head, tai... | [
"def get_dirs(base_dir):\n\n dir_list = []\n\n # pylint:disable=unused-variable\n # os.walk returns a tuple, not all returned values are needed.\n for dir_name, dirs, filenames in os.walk(base_dir):\n for direc in dirs:\n dir_list.append(os.path.join(dir_name, direc))\n\n return dir... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Split off the tail directory and add the parameters in that string to param dictionary passed. Return the head directory >>> params = {} >>> get_dir_params('/tmp/alpha1.0_alpha_decay1', params) '/tmp' >>> len(params) 2 >>> params['alpha_decay'] 1 >>> params['alpha'] 1.0 | def get_dir_params(dirpath, params):
(head, tail) = os.path.split(dirpath)
params.update(split_params(tail))
return head | [
"def get_param_set_subdirs(dirpath):\n patt = '^' + dirpath + r'/ps'\n # extra pattern to account for 'prior' param set\n prior_patt = '^' + dirpath + r'/prior'\n return get_subdirs(dirpath, patt) + get_subdirs(dirpath, prior_patt)",
"def _distributeparams(self, params):\n\n result = {}\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Join a number to the end of a string in the standard way If width is provided will backfill >>> join_number('fred', 10) 'fred10' >>> join_number('fred', 10, 3) 'fred010' | def join_number(string, num, width=None):
num = str(num)
if width:
num = num.rjust(width, '0')
return string + '-' + str(num) | [
"def format_number(number, num_digits=3):\n return str(number).zfill(num_digits)",
"def pad_number(self, num):\n num_str = str(num)\n return '0' * (3-len(num_str)) + num_str",
"def formatNumber(number):\n temp = str(number)\n while len(temp) < 4:\n temp = '0' + temp\n return tem... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Splits off a number from the end of the string and returns the tuple >>> split_number('rawdata.txt500') ('rawdata.txt', 500) >>> split_number('squareboxsquare2.5') ('squareboxsquare', 2.5) >>> split_number('fred') ('fred', None) >>> split_number('fredjones') ('fredjones', None) >>> split_number(0) ('', 0) >>> split_num... | def split_number(string):
try:
parts = string.split('-')
except AttributeError:
try:
string * string
return ('', string)
except TypeError:
return None
end = parts[-1]
if '.' in end:
try:
num = float(end)
except:
num = None
else:
try:
num = int(end)
except:
num = None
if num... | [
"def _split(string):\n import re\n return list(map(int, re.findall(r'\\d+', string)))[-1]",
"def split(number):\n if number < 10:\n return [0, number]\n num1 = str(number)[0]\n num2 = str(number)[1]\n return [num1, num2]",
"def split_num(s):\n i = 0\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Splits a parameter string into its keyvalue pairs >>> d = split_params('alpha0.5_gamma0.9') >>> d['alpha'] 0.5 >>> d['gamma'] 0.9 >>> d = split_params('depth15_featuresabc') >>> d['depth'] 15 >>> d['features'] ['a', 'b', 'c'] >>> d = split_params('alpha0.1_lab_trace_rateNone') >>> d['alpha'] 0.1 >>> d['l'] ['a', 'b'] >... | def split_params(param_string):
#TODO: check for negatives i.e. alpha--1
parts = param_string.split('_')
params = {}
for i in range(len(parts)):
param = split_items(parts[i])
if len(param) < 2:
try:
parts[i+1] = parts[i] + "_" + parts[i+1]
except:
pass
continue
elif len(param) == 2:
param... | [
"def _splitParam(cls, param):\n if '=' not in param:\n param += '='\n key, value = param.split('=', 1)\n return key, map(cls._unescapeParamValue, value.split(','))",
"def _split_url_string(param_str):\n parameters = parse_qs(param_str, keep_blank_values=False)\n for k... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes modifiers from the given string and returns the original name plus a list of the modifiers present (checked against mod_set if provided) >>> split_modifiers('joint_active_scaled_return', ['trace', 'scaled', 'return']) ('joint_active', ['scaled', 'return']) >>> split_modifiers('joint_active_scaled') ('joint', ['... | def split_modifiers(mod_string, mod_set=None):
parts = mod_string.split('_')
if mod_set is None:
return (parts[0], parts[1:])
name = [parts[0]]
mods = []
for p in parts[1:]:
if p in mod_set:
mods.append(p)
else:
name.append(p)
return ('_'.join(name), mods) | [
"def remove_modifiers(*values, sort=False, mod_set=None):\n\tfeatures = []\n\tfor f in values:\n\t\t(name, mods) = split_modifiers(f, mod_set=mod_set)\n\t\tif name not in features:\n\t\t\tfeatures.append(name)\n\tif sort:\n\t\tfeatures.sort()\n\treturn features",
"def split_by_separator(self, string, separator):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes _scaled, etc, from the feature list to create a unique set of the features as in the environment directory >>> features = ['obs2_scaled_decayed','obs1_scaled','obs2_scaled','obs1_return'] >>> remove_modifiers(features, sort=False) ['obs2', 'obs1'] >>> remove_modifiers(features, sort=True) ['obs1', 'obs2'] >>> r... | def remove_modifiers(*values, sort=False, mod_set=None):
features = []
for f in values:
(name, mods) = split_modifiers(f, mod_set=mod_set)
if name not in features:
features.append(name)
if sort:
features.sort()
return features | [
"def remove_modifiers(self):\n retval = set()\n for modifier in MODIFIERS:\n if self._remove_modifier(modifier):\n retval.add(modifier)\n return retval",
"def attr_remove(self):\n def _del_if_in(obj, attr):\n if attr in obj:\n del obj... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if filepath ends in 'gz' extension | def is_zip(filepath):
return os.path.splitext(filepath)[1] == '.gz' | [
"def check_gzip_path(file_path):\n _, ftype = mimetypes.guess_type(file_path)\n return ftype == 'gzip'",
"def is_archive_ext(filepath):\n file_extension = os.path.splitext(filepath)[1].lower()\n if file_extension in get_archive_extensions():\n return True\n else:\n return False",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively creates every directory in dirpath if it does not exists. Returns True/False on success/failure >>> newdir = '/tmp/asdf/fdsa/fred' >>> os.path.exists(newdir) False >>> os.path.exists('/tmp/asdf/fdsa') False >>> make_dirs(newdir) '/tmp/asdf/fdsa/fred' >>> os.path.exists(newdir) True >>> os.path.exists('/tmp/... | def make_dirs(dirpath, debug=False):
if not os.path.exists(dirpath):
try:
os.mkdir(dirpath)
except OSError as e:
if debug:
print(e)
(head, tail) = os.path.split(dirpath)
if '/' not in head or os.path.exists(head):
return False
else:
if(make_dirs(head)):
return make_dirs(dirpath)
re... | [
"def makedirs_ifneeded(dirpath):\n if sys.version[0] == '3':\n # python 3.X\n os.makedirs(dirpath, exist_ok=True) # 'recursive' mkdir, as needed\n else:\n # python 2.X\n try:\n os.makedirs(dirpath) # 2.X has no exists_ok\n ex... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Standardize array naming! >>> get_array_headers('tile_index', 3) ['tile_index0', 'tile_index1', 'tile_index2'] >>> get_array_headers('a', 1) ['a0'] >>> get_array_headers('a', 10)[0] 'a00' >>> get_array_headers('a', 1000)[1] 'a0001' | def get_array_headers(array_name, length):
width = len(str(length))
return [join_items([array_name, str(i).zfill(width)]) for i in range(length)] | [
"def make_headers(worksheet):\n headers = {}\n cell_idx = 0\n while cell_idx < worksheet.ncols:\n cell_type = worksheet.cell_type(0, cell_idx)\n if cell_type == 1:\n header = slughifi(worksheet.cell_value(0, cell_idx))\n if not header.startswith(\"_\"):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set attributes of the obj according to arguments in params include_all will add all the arguments in params to the object if not will only add those that are in valid_params if validate_params, will check that the params in valid_params are not None | def set_attributes(obj, include_all=True, validate_params=False, valid_params=None, **params):
# make sure all required values are here
if valid_params:
for k in valid_params:
if k not in params:
if not hasattr(obj, k):
raise ParameterException("Required parameter {0} missing".format(k))
else:
... | [
"def set_params(self, **kwargs):\n for key, value in kwargs.items():\n if key in self.attack_params:\n setattr(self, key, value)\n return True",
"def addParams(self, *params):\n for param in params:\n self.addParam(param)\n self.params = list(set(self.params))",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure the iterable params contains all elements of required_params If validate_values is True, make sure params[k] are set. If required_params is a dictionary, make sure params[k] are set to the values given >>> validate_params(['a','b','c'], ['a','b']) True >>> validate_params(['a','b','c'], ['a','b','d']) False | def validate_params(params, required_params, validate_values=False):
# every key (or element) in required_params must be present in the given params
for k in required_params:
if k not in params:
return False
elif validate_values:
try:
# see if we got a dictionary of parameters
p_val = params.get(k)... | [
"def _check_req_params(self,req_params,params):\n if not all(req_p in params for req_p in req_params):\n raise ParameterRequiredError('Missing required parameter(s) %s' % req_params)",
"def _validate_params(self, params: dict) -> list:\n invalid_params = list()\n if params.get('amo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the next noncommented line of strings from a file, separated by whitespace >>> f = open('results/testing/pos/Large/rawdata.txt', 'r') >>> read_strings(f) ['a', 'b', 'c', 'd', 'e', 'f', 'step'] | def read_strings(filepointer):
line = '#'
try:
while line and line[0]=='#':
line = filepointer.readline()
except (IOError, ValueError):
return None
if line:
return line.split()
else:
return None | [
"def get_strings(src_file):\n res = []\n try:\n res = open(src_file,'r').readlines()\n res = [x.strip() for x in res]\n except:\n res = []\n return res",
"def get_strings(filename):\n list_strings= []\n with open(filename,'rU') as f:\n list_strings= [line[:-1] for lin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the next line of floats from a file, separated by whitespace >>> f = open('results/testing/pos/Large/rawdata.txt', 'r') >>> read_floats(f) [0.0, 0.2, 500.0, 0.0, 0.001, 0.0, 1.0] | def read_floats(filepointer):
data = read_strings(filepointer)
if not data:
return None
try:
data = [float(x) for x in data]
return data
except:
# try the next line
return read_floats(filepointer) | [
"def read_float(filename):\n\tf = open(filename, \"r\")\n\tarr = np.fromfile(f, dtype='>f4')\n\treturn arr",
"def read_floatnl(f):\n s = read_stringnl(f, decode=False, stripquotes=False)\n return float(s)",
"def txt2float(file: str) -> float:\n return float(get_first_line(file))",
"def read_floats(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a dictionary of the column indices keyed by the header in the file | def get_header_indices(filepath):
headers = get_header_list(filepath, sort=False)
return {h: i for i, h in enumerate(headers)} | [
"def indices(header):\n return dict((n,i) for i,n in enumerate(header))",
"def file_columns(header):\n\n return [i for i, h in zip(range(0, len(header)), header) if\n is_file_column(h)]",
"def get_column_positions(table):\n \n column_names = get_columns(table)\n column_dict = {}\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Moves file pointer to the next noncomment line and returns the comments as a list of strings. | def skip_comments(filepointer):
comments = []
data = '#'
try:
pos = filepointer.tell()
except:
print("Could not read file.")
return None
while data[0] == '#':
data = filepointer.readline()
if not data:
raise Exception("Unexpected end of file while reading comments.")
if data[0] == '#':
commen... | [
"def extract_comments(filename):\n comments = []\n try:\n with open(filename, 'r') as source_file:\n file_contents = source_file.read()\n\n # extract single and multiline comments from source code file\n file_contents = parse_single_line_comments(file_contents, comments... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if all of the keys specified in required are present and match or do not match settings if specified. | def check_param_matches(candidate, settings=None, required=None, restricted=None):
print("Deprecated?")
if not settings:
settings = {}
if not required:
required = []
if not restricted:
restricted = []
# required keys must be present in candidate and match the value in settings,
# if provided
for p in req... | [
"def validate_conf_data(required_keys, conf):\n\treturn_val = True\n\tfor key_conf_req in required_keys:\n\t\t\tif(find_conf_value(key_conf_req, conf) == False):\n\t\t\t\treturn_val = False\n\n\treturn return_val",
"def _check_keys(self, jdata):\n for k in REQUIRED_KEYS.keys():\n if k not in jda... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For the given list of parameter dictionaries, return a list of the dictionary keys that appear in every parameter dictionary | def get_shared_keys(param_list):
if not param_list:
return
keys = set(param_list[0].keys())
for i in range(1, len(param_list)):
keys = keys.intersection(param_list[i].keys())
keys = list(keys)
keys.sort()
return keys | [
"def find_all(list_of_dict, match_function):\n\treturn [entry for entry in list_of_dict if match_function(entry)]",
"def extract_key_query_params(\n query_params: Dict[str, List[str]], param_key: str\n) -> Set[str]:\n return set(\n [\n item.lower()\n for sublist in [\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a dictionary of the unique sets of param values for the given keys, indexed by a name made up of those values | def group_by_keys(param_list, keys):
keys = list(keys)
names = {}
for p in param_list:
if len(keys) > 0:
key = join_params(**{k: p.get(k, None) for k in keys})
#vals = {k: p.get(k, None) for k in keys}
#name = join_params(**vals)
#names[name]=vals
else:
key = ''
if key in names:
names[key]... | [
"def _multi_uniques(params: Dict[str, np.ndarray], uniques: List[str]) -> Tuple[combvec, List[str]]:\n unique_values = {}\n keys = []\n for val in uniques:\n unique_values[val] = np.unique(params[val])\n keys.append(val)\n iterator = combvec(*unique_values.values())\n return iterator, k... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For each entry in arg_dict add the argument to the parser if it is not already in the namespace provided. If sources is a dictionary of strings, will use the strings as the help message for the key If source is a dictionary of dictionaries, will pass the dictionary elements as parameters to add_argument | def add_arguments(arg_dict, parser, namespace=None):
for k in arg_dict:
if namespace and hasattr(namespace, k):
continue
try:
h = arg_dict[k]
if isinstance(h, dict):
parser.add_argument('--'+k, **h)
else:
parser.add_argument('--'+k, help=h)
except:
parser.add_argument('--'+k, help='manager... | [
"def _map_arguments(self, args):\n data = args.get('data')\n comp = args.get('comp')\n library = args.get('library')\n dry_run = args.get('dry_run', False)\n\n self._set_link('srcmaps-catalog', SrcmapsCatalog_SG,\n comp=comp, data=data,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse the given launch arguments from the command line, into list of tuples for launch. | def parse_launch_arguments(launch_arguments: List[Text]) -> List[Tuple[Text, Text]]:
parsed_launch_arguments = OrderedDict() # type: ignore
for argument in launch_arguments:
count = argument.count(':=')
if count == 0 or argument.startswith(':=') or (count == 1 and argument.endswith(':=')):
... | [
"def parse_arguments(args):",
"def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a single experience instance | def get_single_experience(self, time_step):
assert self.n_experience - 1 > time_step, "Sample time step must be less than number of experience minus one."
return self.buffer_experience[time_step] | [
"def experience():\n return ExperienceFactory()",
"def get_experience_by_id(id_):\n\n return Experience.query.get(id_)",
"def test_get_experience(self):\n\t\tself.assertEqual(self.resume.get_experience().first(), self.experience)",
"def get_experience(experience_id: Optional[str] = None,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return batch list of experience instance | def get_batch_experience(self, batch_size):
batch = []
for i in range(batch_size):
index = random.choice(range(self.n_experience - 1))
batch.append(self.get_single_experience(index))
return batch | [
"def get_experience(self):\n\t\treturn self.experience_set.all()",
"def get_experiences():\n return Experience.query.all()",
"def get_experience(self):\n return self.experience_set.all()",
"def experiences(self):\n return self.client.call('GET',\n self.name ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test that audiobook can be inserted into db | def test_audiobook_can_insert(self):
data = {
"audiotype": "Audiobook",
"metadata": {
"duration": 37477,
"title": "another",
"author": "Solomon",
"narrator": "Ndiferke"
}
}
response = requests.po... | [
"def test_create_audiobook_ok(client):\n\n with app.test_request_context():\n response = client.post(\n url_for(\"create_audio\"),\n json={\n \"audioFileType\": \"audiobook\",\n \"audioFileMetaData\": {\n \"title\": \"Tale of two citie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test that audiobook can be read from DB | def test_audiobook_can_read(self):
response = requests.get(
"http://localhost:9001/api/get-audio/Audiobook")
self.assertEqual(response.status_code, 200) | [
"def test_get_all_audiobook_ok(client):\n\n with app.test_request_context():\n response = client.get(url_for(\"get_all_audio\", audioFileType=\"audiobook\"))\n\n assert response.status_code == 200\n res_data = response.get_json()\n record = list(filter(lambda x: x[\"id\"] == AUDIOBOOK_ID, res_dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test that audiobook can be deleted from DB | def test_audiobook_can_delete(self):
num = str(5)
response = requests.delete(
"http://localhost:9001/api/delete-audio/Audiobook/"+num)
self.assertEqual(response.status_code, 200) | [
"def test_delete_audiobook_ok(client):\n\n with app.test_request_context():\n response = client.get(\n url_for(\"delete_audio\", audioFileType=\"audiobook\", audioFileID=AUDIOBOOK_ID)\n )\n\n assert response.status_code == 200\n res_data = response.data.decode(\"utf-8\")\n asser... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test that audiobook can be updated in DB | def test_audiobook_can_update(self):
data = {
"audiotype": "Audiobook",
"metadata": {
"title": "audiobook1",
"duration": 45678,
"author": "Solomon",
"narrator": "Aniefiok"
}
}
num = str(3)
... | [
"def test_update_audiobook_ok(client):\n\n with app.test_request_context():\n response = client.post(\n url_for(\n \"update_audio\", audioFileType=\"audiobook\", audioFileID=AUDIOBOOK_ID\n ),\n json={\n \"title\": \"Winery Dawn\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validates aliasing works properly when the query contains both tags_key and tags_value. | def test_aliasing() -> None:
processed = parse_and_process(
{
"aggregations": [],
"groupby": [],
"selected_columns": ["tags_value"],
"conditions": [["tags_key", "IN", ["t1", "t2"]]],
}
)
sql = format_query(processed).get_sql()
transactions_... | [
"def ambiguos_val_msg(query, key, val):\n query = str(query).strip()\n msg = 'Provided query=%s\\n' % query\n msg += 'Contains ambiguous condition %s=%s\\n' % (key, val)\n msg += 'DAS does not support AND|OR operations, please revisit your '\n msg += 'query and choose either value'\n return msg",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the project_ids associated with this Community. | def get_project_ids(self, *criterion):
from wkcdd.models.helpers import get_project_ids
return get_project_ids([self.id], *criterion) | [
"def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)",
"def extract_project_ids(crm_client):\n\n project_response = crm_clie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks if there are ids already initialized | def check_initial_ids(self):
if '_Base__nb_objects' in dir(Square):
type(self).initial_ids = Square.__dict__['_Base__nb_objects'] - 1 | [
"def checkUniquenessIds(self) :\n l = set([len(x[\"id\"]) for x in self.GO])\n if (not l == set([1])) :\n raise GOparserError(\"Not all entries have exactly one id\")\n ids = set([x[\"id\"][0] for x in self.GO])\n if (not len(ids) == len(self.GO)) :\n raise GOparser... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
We use a different broker_url when running the workers than when running within the flask app. Generate an appropriate URL with that in mind | def broker_url(host):
return '{broker_scheme}://{username}:{password}@{host}:{port}//'.format(host=host, **CONFIG_JOB_QUEUE) | [
"def _url():\n # Saving this for whenever we need to run jobs across different machines\n # workflow_manager = 'bee_wfm/v1/jobs/'\n # #wfm_listen_port = bc.get('workflow_manager', 'listen_port')\n # wfm_listen_port = wf_db.get_wfm_port()\n # return f'http://127.0.0.1:{wfm_listen_port}/{workflow_manag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
approxdp outputs eps as a function of delta based on rdp calculations | def approxdp(delta):
if delta < 0 or delta > 1:
print("Error! delta is a probability and must be between 0 and 1")
if delta == 0:
return rdp(np.inf)
else:
def fun(x): # the input the RDP's \alpha
if x <= 1:
return np.inf
... | [
"def draw_p_to_eps(p):\n return ppf((p + 1.0) / 2)",
"def update_epsilon(self):\n if self.eps > self.eps_end:\n self.eps -= self.eps_decay\n\n # Reached final epsilon\n if self.eps <= self.eps_end:\n if not self.printStopExploration:\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
from an approxdp function to fdp | def approxdp_func_to_fdp(func, delta_func=False):
#
# By default, logdelta_func is False, and func is eps as a function of delta
# fpr = maximize_{delta} approxdp_to_fdp(eps(delta),delta)(fpr)
# if delta_func is True, it means that 'func' is a delta as a function of eps, then
# fpr = maximize_{delta... | [
"def InterpolateDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
split by block extract 1st level data (timestamp, and raw data of block) to dict | def _split_by_block(self, path=None,category='meminfo'):
with open(path, "r") as f:
text = f.read()
lst = re.split('zzz', text, flags=re.DOTALL) # to list based on time
lst = [x for x in lst if x] # remove empty strings
"""
Python 2.x
... | [
"def splitRawData(self, rawdata):\n data = {}\n times = {}\n for probe, probeData in rawdata.iteritems():\n data[probe] = probeData['data']\n times[probe] = probeData['time']\n \n return data, times",
"def _prepare_for_storage(cls, raw_block):\n bloc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
split by keypair element within a line | def _split_by_keypair(self, osw_dict={}):
lst = osw_dict
keypair_dict = []
for d in lst:
if d['key'] == 'raw_line':
keypair_lst = re.split(r',',d['value'])
for k,v in keypair_lst:
_d = [{'timestamp':d['timestamp... | [
"def __split_line__(self, line):\n line = line.strip()\n cat, pair = line.split(':', 1)\n key, val = pair.split('=', 1)\n return cat, key, val",
"def split_resultline(line: str) -> t.Mapping[str,str]:\n\treturn split_keyvalueline(line[len('RESULT '):].strip())",
"def split_file_with_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
analyze oswmem free memory check if free mmemory <= min memory | def oswmem_free_memory(self,min=0):
result = self.df[self.df['free mmemory'] > min].all
return result | [
"def __isMemoryAvailable(self) :\n #mem_free = psutil.phymem_usage()[2]\n\n #print \"Memory free = \" + str(mem_free)\n success = False\n found = False\n almost_size = 0\n size = 0\n self.free = 0\n line = \"\"\n freeMagnitude = None\n\n #####\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a label on edge | def add_edge_label(self, edge, label, color):
# Sort vertices index min - max
p0, p1 = edge
p0, p1 = min(p0, p1), max(p0, p1)
self.edges_label[(p0, p1)].append((label, color)) | [
"def _edgeLabel(self, node, parent):\r\n return self.word[node.idx + parent.depth: node.idx + node.depth]",
"def add_edge(source, sink, label=\"\"):\n source.add_outgoing_edge(sink, label)\n sink.add_incoming_edge(source, label)",
"def make_graphviz_labels(self):\n self_loops = set(self.self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a label on point | def add_point_label(self, point, label, color):
self.vertices_label[point].append((label, color)) | [
"def point_label(word, x, y):\n plt.text(x, y, word, horizontalalignment='center', verticalalignment='bottom')",
"def _draw_label(label, label_x, label_y):\n pass",
"def draw_shape_label(self, label, xform, colour):\n #TODO deal with alignment, rotation\n pos = xform.chain(Point(label.x,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform a 3D point from the mesh to a 3D point of the world by multiplying with the matrix world | def _world_point(self, point_3d):
return self.obj.matrix_world @ point_3d | [
"def _traversible_world_to_vertex_world(self, pos_3):\n # Divide by 100 to convert origin back to meters\n # The mesh is stored in units of meters\n # So we just offset the human by the desired # meters\n xy_offset_map = self.map.origin/100. + pos_3[:2]\n\n pos_3 = np.array([xy_offset_map[0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the normal vector (pointing outside) to an object and a pair of vertices | def _normal_vector(o, p0_3d, p1_3d):
# The vector between middle point of v1-v2 and object center location
# is the normal vector I'm looking for
vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation
# normalize so I can to length computation on it
vn.normalize()
return vn | [
"def vector(self):\n v = self.vertex.position - self.twin.vertex.position\n return v",
"def normal(self) -> Vector:\n return normalize(cross(self.d1, self.d2))",
"def normal_vector(origin, vectors):\n return np.cross(vectors[0] - origin, vectors[1] - origin)",
"def GetVertexNormal(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load 10 products from dump.json. | def loadProducts():
dump = os.path.dirname(os.path.abspath(__file__)) + "/dump.json"
data = open(dump, 'r')
for deserialized_object in serializers.deserialize("json", data):
deserialized_object.save() | [
"def load_products():\n\n\tprint \"Products\"\n\tproduct_id = 0\n\twith open('items.json') as file:\n\t\tfor title in file:\n\t\t\ttitle = title.rstrip().strip(',')\n\t\t\tproduct = Product(product_id=product_id, title=title)\n\t\t\tproduct_id += 1\n\t\t\tdb.session.add(product)\n\t\tdb.session.commit()",
"def pr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that core.learncurve.learning_curve raises NotADirectoryError | def test_learncurve_raises_not_a_directory(dir_option_to_change,
specific_config,
tmp_path, device):
options_to_change = [
{"section": "LEARNCURVE", "option": "device", "value": device},
dir_option_to_change
]
... | [
"def testNotADirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"not_a_directory\")",
"def test_run_training_set_invalid_root_dir(self):\n with self.assertRaises(tf.errors.FailedPreconditionError):\n policy_util.train(\n ag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Downloads and extracts a zip file from S3. | def download_zip_file(s3_client, bucket, key):
temp_file = tempfile.NamedTemporaryFile()
with tempfile.NamedTemporaryFile() as temp_file:
s3_client.download_file(bucket, key, temp_file.name)
with zipfile.ZipFile(temp_file.name, "r") as zip_file:
yield zip_file | [
"def get_zipfile(bucket_name, file_name, dl_name):\n s3 = boto3.client('s3')\n s3.download_file(bucket_name, file_name, dl_name)",
"def get_zip_file(url):\n with requests.get(url, stream=True) as f:\n z = zipfile.ZipFile(io.BytesIO(f.content))\n return z",
"def _download_and_extract_submissio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fix END (missing END, End > END, END position should be the same as FOR etc). | def fix_end(self, node):
if node.header.tokens[0].type == Token.SEPARATOR:
indent = node.header.tokens[0]
else:
indent = Token(Token.SEPARATOR, self.formatting_config.separator)
node.end = End([indent, Token(Token.END, "END"), Token(Token.EOL)]) | [
"def end():\n return EndBlock()",
"def SectionEnd():\n pass",
"def end() -> Parser:\n return End()",
"def end(self, end: pos.Pos) -> None:\n self.__end = end",
"def remove_bottom_end_tags(resume_output):\n # print(\"template before removing end tag:\", resume_output)\n\n # sets line co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Split statements from node for those that belong to it and outside nodes. | def collect_inside_statements(self, node):
new_body = [[], []]
is_outside = False
starting_col = self.get_column(node)
for child in node.body:
if not isinstance(child, EmptyLine) and self.get_column(child) <= starting_col:
is_outside = True
new_bod... | [
"def handle_statements(stmt):\n\n tokens = []\n depth = 0\n for token in stmt:\n if token[0] == 'DO': depth += 1\n elif token[0] == 'END': depth -= 1\n\n if depth == 0 and token[0] == 'NEWLINE':\n if len(tokens):\n tree = Parser... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render action This action returns a wiki page with optional message, or redirects to new page. | def render(self):
_ = self.request.getText
form = self.request.form
if form.has_key('cancel'):
# User canceled
return self.page.send_page(self.request)
try:
if not self.allowed():
raise ActionError(_('You are not allowed to ed... | [
"def render(self):\n _ = self._\n form = self.form\n \n if form.has_key(self.form_cancel):\n self.render_cancel()\n return\n\n # Validate allowance, user rights and other conditions.\n error = None\n if self.is_excluded():\n error = _... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a discrete control set that is shaped like a cosine function. | def gen_controls_cos(complex_controls, control_count, control_eval_count,
evolution_time, max_control_norms, periods=10.):
period = np.divide(control_eval_count, periods)
b = np.divide(2 * np.pi, period)
controls = np.zeros((control_eval_count, control_count))
# Create a wave f... | [
"def cosseries():\r\n def _cos():\r\n for term in integ(integ(-COS), Fraction(1, 1)):\r\n yield term\r\n COS = PowerSeries(_cos)\r\n return COS",
"def discrete_cosine_tranform(self, matrix):\n print(\"--------DCT----------\")\n print(matrix)\n N = len(matrix)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a discrete control set that is shaped like a flat line with small amplitude. | def gen_controls_flat(complex_controls, control_count, control_eval_count,
evolution_time, max_control_norms, periods=10.):
controls = np.zeros((control_eval_count, control_count))
# Make each control a flat line for all time.
for i in range(control_count):
max_norm = max_cont... | [
"def test_basis_categorical():\n cat_data = ['sand'] * 20 + [np.nan] * 5 + ['cement'] * 10 + [np.nan] * 5\n curve_cat = Curve(cat_data, index=range(0, 40))\n curve_new = curve_cat.to_basis(start=5, stop=30, step=1)\n assert len(curve_new) == 26",
"def make_curve_data(control_points):\n spline =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sanitize `initial_controls` with `max_control_norms`. Generate both if either was not specified. | def initialize_controls(complex_controls,
control_count,
control_eval_count, evolution_time,
initial_controls, max_control_norms):
if max_control_norms is None:
max_control_norms = np.ones(control_count)
if initial_controls... | [
"def autostrip(cls):\n fields = [(key, value) for key, value in cls.base_fields.iteritems()\n if isinstance(value, forms.CharField)]\n for field_name, field_object in fields:\n def get_clean_func(original_clean):\n return lambda value: original_clean(value and value.strip())\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the real state of the system when the program (re)start Transition init > any is done callback leave_init() is call manually. No email/sms alert will be sent | def init_state(self):
self.read_inputs()
if (self.in_power.value == 1) and (self.in_alert.value == 1):
self.state = 'alert'
elif (self.in_power.value == 1):
self.state = 'on'
else:
self.state = 'off'
self.leave_init() | [
"def init(self):\n self.connect_to_switches()\n self.reset_states()",
"def start_state_machiene( self ):\n\t\tself.__set_catcher( False )\n\t\tself.sawTarget = False\n\t\twhile( not rospy.is_shutdown() ): \n\n\t\t\t# Store Previous State \n\t\t\tif self.previousState is not self.state:\n\t\t\t\tself... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
wrapper method to call mail & sms alerts | def make_alert(*args):
try: SmsAlarmAlert(*args)
except: logger.exception('Fail calling SmsAlarmAlert()')
try: EmailAlarmAlert(*args)
except: logger.exception('Fail calling EmailAlarmAlert()') | [
"def sms(self, study, alerts, **kwargs):\n print(\"'sms' not implemented\")",
"def test_send_sms_report(self):\n pass",
"def do_alert_send(self, request, params):\n receiver = params[\"receiver\"]\n title = params.get(\"title\")\n title_en = params.get(\"title_en\")\n m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
wrapper method to call DBLog.new() on alarm event | def make_DBLog(subject, event, badge, detail=''):
app = create_app()
with app.app_context():
DBLog.new(subject=subject, scope="nox", badge=badge, message=event, ip='-', user='-', detail=detail) | [
"def new_archive_record(self, event):\n \n # Reset the alarm counter\n self.alarm_count = 0",
"def set_new_alarm():\r\n time = request.args.get('alarm')\r\n name = request.args.get('two')\r\n news = request.args.get('news')\r\n weather = request.args.get('weather')\r\n date = t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes phase from given timestamps. Phase is normalized time from 0 to 1. | def normalize_time(full_timestamps, half_timestamp):
phases = (half_timestamp - full_timestamps[0]) / (full_timestamps[-1] - full_timestamps[0])
return phases | [
"def normalize_time(timestamps):\n phases = (timestamps - timestamps[0]) / (timestamps[-1] - timestamps[0])\n return phases",
"def phase(t,t0,P):\n return np.array((t - t0)/P - np.floor((t - t0)/P))",
"def calc_phase(self, time):\n dur = self.get_duration()\n phase = time / dur\n\n if self.ena... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute a TxN matrix of features of the given phase vector using Gaussian basis functions. Where T is the number of elements in the phase vector and N is the number of basis functions. | def compute_feature_matrix(phases, N, h):
T = len(phases)
# Uniformly distribute the centers of N basis functions in domain[-2h,2h+1].
centers = np.linspace(-2 * h, 1 + 2 * h, num=N)
# compute a TxN matrix with centers
C = np.repeat(centers.reshape(1, N), T, axis=0)
# compute a TxN matrix with p... | [
"def generate_thetas(N, O, T, mu1=-2., sigma1=50, mu2=0., sigma2=50, alpha=12., ratio_modulated=1.):\n D = transforms.compute_D(N, O)\n MU = numpy.tile(mu1, (T, D))\n MU[:,N:] = mu2\n # Create covariance matrix\n X = numpy.tile(numpy.arange(T),(T,1))\n K1 = 1./alpha*numpy.exp(-(X - X.transpose()) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function prints out header keywords as part of BPIXTAB verification procedure. Parameter(s) | def bpix_kw(bpixtab):
print('Verifying the header keywords of UVIS bad pixel table {}...'.format(bpixtab))
print('USEAFTER:')
print(fits.getheader(bpixtab)['USEAFTER'])
print(' ')
print('PEDIGREE:')
print(fits.getheader(bpixtab)['PEDIGREE'])
print(' ')
print('DESCRIP:')
print(fits.ge... | [
"def PrintHeader(self):",
"def print_header(self):\n for param in sorted(self.header.keys()):\n if param in (\"HEADER_START\", \"HEADER_END\"):\n continue\n print(\"%s: %s\" % (param, self.header[param]))",
"def print_header_information():\n\t\tprint \"Elijah Molloy\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The main function for the UVIS bad pixel table verification procedure. | def bpixtab_test(bpixtab, path='/grp/hst/wfc3j/jmedina/bpixtab_test/'):
# Verifying the header keywords
bpix_kw(bpixtab)
# Generating an image of the bad pixels using the bad pixel table
# which can be inspected using DS9
bpix_image(bpixtab, path) # uses default path | [
"def runTest() -> None:\r\n\r\n rgb_to_hsv_testing()",
"def main():\n args = load_args()\n x, y, z = test_domain(args.nx, args.ny, args.nz, args.n_cores)\n\n if x.size + y.size > 0:\n # We only care if there are issues on the surface layer\n message_parts = []\n if x.size > 0:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |