query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
this fuction runs video annotation | def main_video():
annotate_movie("project_video.mp4", "annotated_project_video.mp4")
# annotate_movie("challenge_video.mp4", "annotated_challenge_video.mp4") | [
"def annotate_video(self):\n self.frame_count = 0\n video = VideoFileClip(self.video_file)\n annotated_video = video.fl_image(self.detect)\n annotated_video.write_videofile(self.output_file, audio=False)",
"def run_on_video(self, video):\n video_visualizer = VideoVisualizer(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Info gives human readable information about the token class as a string. | def info(cls):
return "class __name__: {}, basename: {}, name: {}, pattern: {}".format(cls.__name__, cls.basename, cls.name, repr(cls.pattern_str)) | [
"def bearer_info_func(token):\n token_payload = decode_token_payload(token)\n token_info = {\n \"user_id\": token_payload.get(\"sub\"),\n \"client_id\": token_payload.get(\"cli\"),\n \"token_type\": token_payload.get(\"typ\"),\n }\n return token_info",
"def info():\n print('sfp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return table change rule with the given token name. | def get_table_change_rule(self, token_name):
return self.__table_change_rules[token_name] | [
"def add_table_change_rule(self, token_name, token_table):\n self.__table_change_rules[token_name] = token_table",
"def del_table_change_rule(self, token_name):\n del self.__table_change_rules[token_name]",
"def get_token(self, token_name):\n return self.__table[token_name]",
"def get_rul... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add table change rule from a token name to a token table. | def add_table_change_rule(self, token_name, token_table):
self.__table_change_rules[token_name] = token_table | [
"def get_table_change_rule(self, token_name):\n return self.__table_change_rules[token_name]",
"def del_table_change_rule(self, token_name):\n del self.__table_change_rules[token_name]",
"def add_token(self, token):\n assert(issubclass(token, Token))\n self.__table[token.name] = toke... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete table change rule with the given token name. | def del_table_change_rule(self, token_name):
del self.__table_change_rules[token_name] | [
"def PBH_RULE_delete(db, table_name, rule_name):\n\n ctx = click.get_current_context()\n\n table_name_validator(ctx, db.cfgdb_pipe, table_name)\n rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)\n\n table = str(PBH_RULE_CDB)\n key = (str(table_name), str(rule_name))\n\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add given token to the token table. | def add_token(self, token):
assert(issubclass(token, Token))
self.__table[token.name] = token
# Invaliadate compiled regex
self.__token_re = None | [
"def add_token(self, token, num=1, num_all=1):\n self.add_token_id(self.index.token_to_id(token), num=num, num_all=num_all)",
"def addToken(self, token):\n assert token.__class__.__name__ == 'Token'\n if token in self.token:\n self.logger.warning('Token %s already exists on the pla... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add given tokens from iterable object to the token table. | def add_tokens(self, token_iter):
for t in token_iter:
self.__table[t.name] = t
if len(token_iter):
# Invalidate compiled regex
self.__token_re = None | [
"def append_tokens(self, tokens):\n if type(tokens) != list:\n raise FatalRuntimeError(\"Tokens type error\")\n self.tokens += tokens\n self.length += len(tokens)",
"def _add_tokens(self, tokens: List, terms: List, search_string: str,\n warnings: List) -> None:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create and add a new token class to the token table. New token classes are child classes of given token_subclass or if not given then default_token_class is used. In any case new class is always created. | def add_new_token(self, *vargs, token_subclass=None, **kwargs):
if token_subclass == None:
token_subclass = self.default_token_class
class new_token_class(token_subclass):
pass
assert(issubclass(new_token_class, Token))
new_token_class.init(*vargs, **kwargs)
... | [
"def prepend_class_token(\n tokens: torch.Tensor, class_token: torch.Tensor\n) -> torch.Tensor:\n batch_size = tokens.shape[0]\n batch_class_token = class_token.expand(batch_size, -1, -1)\n return torch.cat([batch_class_token, tokens], dim=1)",
"def add_class(self, class_):\n self.classes.appen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a token from the token table. | def remove_token(self, token):
if isinstance(token, str):
name = token
else:
name == token.name
self.__table.pop(name)
# Invaliadate compiled regex
self.__token_re = None | [
"def delete_token(self, token):\n raise NotImplementedError",
"def remove_token_from_board(self):\n connector = physics_board_connector.PhysicsBoardConnector(self)\n connector.remove_token_from_board()\n self.physics_tokens.remove()",
"def deleteToken(self, token):\n taskMgr.r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove all tokens from the token table. | def remove_tokens(self):
self.__table.clear()
# Invaliadate compiled regex
self.__token_re = None | [
"def clear(self):\n self._tokens.clear()",
"def remove(self):\n for word in self.words:\n word._mwt = None # pylint: disable=W0212\n self.root.multiword_tokens = [tok for tok in self.root.multiword_tokens if tok != self]",
"def remove_token_from_board(self):\n connector =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get token with given name. | def get_token(self, token_name):
return self.__table[token_name] | [
"def get_name_for_token(self, token_name):\n name_value = None\n if token_name in self._name_tokens:\n name_value = self._name_tokens[token_name]\n return name_value",
"def get_token(self, id):\n try:\n return self.id2token[id]\n except KeyError:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Should be called after all tokens are added to the table. Calls generate_match_re() if token table is not empty and token_re has not been previously compiled. Raises TokenizerRegexpError if regexp compilation fails. | def finalize(self):
if self.__token_re == None and self.__table:
self.regenerate_match_re() | [
"def regenerate_match_re(self):\n def find_broken_token_regex():\n \"\"\"Tries to find which token regex is broken.\n\n Returns:\n (str, str). Tuple of token name and token regex.\n \"\"\"\n trs = r\"\"\n for token in self.__table.values()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates regex to which is used to match tokens in the token table. The regex is generated from the tokens stored in the token table. It needs to be regenerated manually every time a token is added iff token table is used by the tokenizer in between. | def regenerate_match_re(self):
def find_broken_token_regex():
"""Tries to find which token regex is broken.
Returns:
(str, str). Tuple of token name and token regex.
"""
trs = r""
for token in self.__table.values():
if ... | [
"def find_broken_token_regex():\n trs = r\"\"\n for token in self.__table.values():\n if token.pattern_str: # Skip tokens with empty pattern\n trs += r\"(?P<{}>{})\".format(token.name, token.pattern_str)\n try:\n re.co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tries to find which token regex is broken. | def find_broken_token_regex():
trs = r""
for token in self.__table.values():
if token.pattern_str: # Skip tokens with empty pattern
trs += r"(?P<{}>{})".format(token.name, token.pattern_str)
try:
re.compile(trs, re.M... | [
"def regenerate_match_re(self):\n def find_broken_token_regex():\n \"\"\"Tries to find which token regex is broken.\n\n Returns:\n (str, str). Tuple of token name and token regex.\n \"\"\"\n trs = r\"\"\n for token in self.__table.values()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns generator for parsed tokens which are instances of Token classes from the symbol table. Some tokens can cause symbol table switch using 'table_change_rules'. Note the magic token named "EOP" if yield_eop is True! If yield_eop is True and no "EOP" named token exists in main token table, then the "EOP" token is a... | def get_tokens_gen(self, text, yield_eop=True):
def generate_error_msg(pos, text):
line_start_pos = 0
line_num = 1
if pos != 0:
line_start_pos = text.rfind(os.linesep, 0, pos)+1
line_num = text.count(os.linesep, 0, pos)+1
line_end_p... | [
"def tokens(self):\n for t in self._ast.tokens:\n yield t",
"def _terminalSymbolsGenerator(self):\n if self.end_symbol_set == \"unicode\":\n symbol_set = UNICODE_SYMBOLS\n else:\n symbol_set = ASCII_SYMBOLS\n\n for c in symbol_set:\n yield(c)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets called by the notifier thread when fname changes. | def notifyChanged(self, fname):
if self.ignoreNotifications:
return
# fileState = self.findFileState(fname)
fileState = next((fileState for fileState in self.files if fileState.fname == fname), None)
if fileState is None:
self.addCreatedFile(fname)
else:
... | [
"def __notifyChange(self):\n self.__flgNotify = True\n auto = self.editor.settings.value(\"autoReloadChangedFiles\")\n if (auto or QtGui.QMessageBox.question(\n self.editor, \"File changed\",\n \"The file <i>%s</i> has has changed externally.\\n\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets called by the notifier thread when fname is created. | def addCreatedFile(self, fname):
fileState = self.addFileState(fname)
logging.info("File created: " + self.niceFilename(fileState))
self.polluted.append(fileState) | [
"def on_created(self, event):\n \"\"\"This will add a file which is added to the watchfolder to the creates and the info file.\"\"\"\n # if (event.src_path.endswith(\".fastq\") or event.src_path.endswith(\".fastq.gz\")):\n # self.creates[event.src_path] = time.time()\n\n log.debug(\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a absolute filename return the file state of this file if it exists. Otherwise create a state | def findFileState(self, fname):
# Side comment: If the file state doesnt exist, that probably means the physical file doesnt exist, which could happen if a dependency was created, which, upon execution creates the file (pdflatex creates a .pdf file which could not have been there before)
return next((fi... | [
"def load_resource_file_state(\r\n self, resource: GenomicResource,\r\n filename: str) -> Optional[ResourceFileState]:",
"def open_file_when_exists(self, filename):\n if self.wait_for_file_to_appear(filename):\n return open(filename, \"rU\")\n return None",
"def bu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write out the message in Line delimited JSON. | def write_msg(msg):
msg = json.dumps(msg) + '\n'
logger.debug('write msg: %s', msg)
sys.stdout.write(msg)
sys.stdout.flush() | [
"def writeJson(message, jsonData):\n jsonOutputStr = json.dumps(jsonData, sort_keys=True, indent=4, separators=(',', ': '))\n write(message + jsonOutputStr)",
"def write_jsonl(filename, arr):\n with open(filename, 'w') as f:\n for line in arr:\n f.write(json.dumps(line) + \"\\n\")",
"def format(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a grade to the list | def add(self, grade):
self.gradesList.append(grade) | [
"def add_student(self, name, grade):\n self._roster.setdefault(grade, [])\n self._roster[grade].append(name)\n self._roster[grade].sort()",
"def add_student(self, name, grade):\n if grade not in range(1, 8):\n raise ValueError(\"Grade can't be below 1 or above 7.\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes a grade from the list | def removeGr(self, grade):
try:
self.gradesList.remove(grade)
except ValueError as e:
print(e) | [
"def removeGrForSt(self, studentId):\n try:\n for grade in self.gradesList:\n if grade.getStId() == studentId:\n self.gradesList.remove(self.findGrBySt(studentId))\n except ValueError as e:\n print(e)",
"def removeGrForD(self, disciplineId):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes the grades for a given student | def removeGrForSt(self, studentId):
try:
for grade in self.gradesList:
if grade.getStId() == studentId:
self.gradesList.remove(self.findGrBySt(studentId))
except ValueError as e:
print(e) | [
"def delStudent(self,st,grades):\r\n if grades==[]:\r\n return st.getID()\r\n if grades[0].getStudent()==st:\r\n self.__listNote.remove(grades[0])\r\n self.__storeInFile()\r\n return self.delStudent(st, grades[1:])",
"def removeGr(self, grade):\n try:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes the grades for a given discipline | def removeGrForD(self, disciplineId):
try:
for grade in self.gradesList:
if grade.getDiscId() == disciplineId:
self.gradesList.remove(self.findGrByD(disciplineId))
except ValueError as e:
print(e) | [
"def RemoveD(self, disciplineID):\r\n disciplineToRemove = Discipline(disciplineID, \"\")\r\n self.__disciplineRepository.Remove(disciplineToRemove)\r\n self.__gradeRepository.remove_all_discipline(disciplineID)",
"def removeDiscipline(self, discipline):\r\n self._undo = self._db.getLi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds a student's grade by his ID | def findGrBySt(self, studentId):
for grade in self.gradesList:
if grade.getStId() == studentId:
return grade | [
"def get_student(self, id = 0):\n return self._students[id]",
"def findGrByD(self, disciplineId):\n for grade in self.gradesList:\n if grade.getDiscId() == disciplineId:\n return grade",
"def get_student(students, id):\n for student in students: # linear search := O(n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds the grade at a discipline by its ID | def findGrByD(self, disciplineId):
for grade in self.gradesList:
if grade.getDiscId() == disciplineId:
return grade | [
"def findGrBySt(self, studentId):\n for grade in self.gradesList:\n if grade.getStId() == studentId:\n return grade",
"def get_all_grades_by_discipline(self, discipline_id):\n grades = []\n for grade in self.__entities.values():\n index = grade.entity_id.f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if the grade is between 1 and 10 | def checkGr(self, grade_value):
if grade_value >=1 and grade_value <= 10:
return grade_value | [
"def grade_statement(score):\n if score < 0:\n return \"Invalid score\"\n else:\n if score >= 90:\n return \"Excellent\"\n elif score >= 50:\n return \"Passable\"\n else:\n return \"Bad\"",
"def score_check(grade, subject, student):\n i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enrolls a student with a given ID at a discipline with a given ID Since this is only enrolling, not grading, the grade value is initialized with none | def enroll(self, grade):
self.add(Grade(grade.getStId(), grade.getDiscId(), grade_value="none")) | [
"def put(self, id):\n adm = ElectionSystemAdministration()\n g = Grading.from_dict(api.payload)\n\n if g is not None:\n \"\"\"This sets the id of the grading object to be overwritten\"\"\" \n g.set_id(id)\n adm.save_grading(g)\n return '', 200\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if a student with a given ID is enrolled at a discipline with a given ID | def checkIfEnr(self, studentId, disciplineId):
ok = 0
for item in self.gradesList:
if item.getStId() == studentId and item.getDiscId() == disciplineId:
ok = 1
if item.getGrValue() == "none":
self.gradesList.remove(self.findGrByD(disciplineI... | [
"def student_exist(self, student_id, exam_id):\n if not self.cnx:\n return None\n\n cursor = self.cnx.cursor()\n cursor.execute(\"SELECT student_id \"\n \"FROM Exam_Results \"\n \"WHERE student_id = ? \\\n AND\\\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the average grade for a given student at a given discipline | def getAvgGrForSt(self, studentId, disciplineId):
nr = 0
s = 0
for item in self.gradesList:
if item.getStId() == studentId and item.getDiscId() == disciplineId and item.getGrValue() != 'none':
nr = nr + 1
s = s + float(item.getGrValue())
if nr ... | [
"def getAvgForDisc(self, disciplineId):\n s = 0\n nr = 0\n for item in self.gradesList:\n if item.getDiscId() == disciplineId and item.getGrValue() != \"none\":\n nr = nr + 1\n s = s + float(item.getGrValue())\n \n if nr!= 0:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the average for a given discipline | def getAvgForDisc(self, disciplineId):
s = 0
nr = 0
for item in self.gradesList:
if item.getDiscId() == disciplineId and item.getGrValue() != "none":
nr = nr + 1
s = s + float(item.getGrValue())
if nr!= 0:
return fl... | [
"def getAvgGrForSt(self, studentId, disciplineId):\n nr = 0\n s = 0\n for item in self.gradesList:\n if item.getStId() == studentId and item.getDiscId() == disciplineId and item.getGrValue() != 'none':\n nr = nr + 1\n s = s + float(item.getGrValue())\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the aggregated average grade for a student (the average between their average grades per discipline) | def getAggregatedAvg(self, studentId):
nr = 0
s = 0
for item in self.gradesList:
if item.getStId() == studentId:
avg = self.getAvgGrForSt(item.getStId(), item.getDiscId())
s = s + float(avg)
nr = nr + 1
if nr != 0:
r... | [
"def getAvgGrForSt(self, studentId, disciplineId):\n nr = 0\n s = 0\n for item in self.gradesList:\n if item.getStId() == studentId and item.getDiscId() == disciplineId and item.getGrValue() != 'none':\n nr = nr + 1\n s = s + float(item.getGrValue())\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks if a given student is failing at a given discipline | def checkIfFailing(self, studentId, disciplineId):
if self.getAvgGrForSt(studentId, disciplineId) < 5:
return True
return False | [
"def checkIfEnr(self, studentId, disciplineId):\n ok = 0\n for item in self.gradesList:\n if item.getStId() == studentId and item.getDiscId() == disciplineId:\n ok = 1\n if item.getGrValue() == \"none\":\n self.gradesList.remove(self.findGrBy... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets all students that are failing at one or more disciplines | def allStFailing(self):
auxList = []
for item in self.gradesList:
if self.checkIfFailing(item.getStId(), item.getDiscId()) == True:
auxList.append(AllStudents(item.getStId(), item.getDiscId(), self.getAvgGrForSt(item.getStId(), item.getDiscId())))
ret... | [
"def filterGrades(self, student, discipline):\n result = []\n for grade in self.gradesList:\n if student != None and grade.getStId() != student.getId():\n continue\n if discipline != None and grade.getDiscId() != discipline.getId():\n continue\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets all students enrolled at a given discipline, sorted by descending order of average grade | def allStEnrolled(self, disciplineId):
auxList = []
auxSt = []
for item in self.gradesList:
if item.getDiscId() == disciplineId and item.getStId() not in auxSt:
auxList.append(AllStudents(item.getStId(), item.getDiscId(), self.getAvgGrForSt(item.getStId(), item.getDis... | [
"def get_students_sorted_by_grade(self):\n if self.students != None:\n # sort students by grade with lambda function\n self.students.sort(key=lambda x: x.grade_level, reverse=False)\n return self.students\n\n # To sort the list in place...\n # ut.sort(key=lambda x: ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets all disciplines with at least one grade, sorted by descending order of average grade | def allDisciplines(self):
auxList = []
auxD = []
for item in self.gradesList:
if item.getGrValue() != "none" and item.getDiscId() not in auxD:
auxList.append(AllDisciplines(item.getDiscId(), self.getAvgForDisc(item.getDiscId())))
auxD.append(item.getDi... | [
"def filterGrades(self, student, discipline):\n result = []\n for grade in self.gradesList:\n if student != None and grade.getStId() != student.getId():\n continue\n if discipline != None and grade.getDiscId() != discipline.getId():\n continue\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of grades for the given student at the given discipline | def filterGrades(self, student, discipline):
result = []
for grade in self.gradesList:
if student != None and grade.getStId() != student.getId():
continue
if discipline != None and grade.getDiscId() != discipline.getId():
continue
resul... | [
"def get_all_grades_by_discipline(self, discipline_id):\n grades = []\n for grade in self.__entities.values():\n index = grade.entity_id.find('.')\n idToCheck = int(grade.entity_id[:index])\n if discipline_id == idToCheck:\n grades.append(grade.grade_val... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the discipline's ID | def getDiscId(self):
return self.disciplineId | [
"def getId(self):\n return self.__disciplineID",
"def getDiscipline(self):\r\n return self.__discipline",
"def id(self):\n return self._dptId",
"def getId(self):\n return self.__studentID",
"def get_id(self):\n return self[\"ds_id\"]",
"def getId(self) -> int:\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the average grade | def getAvg(self):
return self.average_grade | [
"def compute_hw_average(grades):\n return -1",
"def avg_grade(self):\n libStu2Grade= defaultdict(lambda: [])\n for item in self.dataset:\n for i in [1,2,3,4,5,6]:\n grade = item['Rating for Person {}:'.format(i)]\n stu_key = 'What is your name? (Person 1)'.format(i) if i == 1 else 'Per... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the average grade | def getAvg(self):
return self.average_grade | [
"def compute_hw_average(grades):\n return -1",
"def avg_grade(self):\n libStu2Grade= defaultdict(lambda: [])\n for item in self.dataset:\n for i in [1,2,3,4,5,6]:\n grade = item['Rating for Person {}:'.format(i)]\n stu_key = 'What is your name? (Person 1)'.format(i) if i == 1 else 'Per... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints the decision rules from the tree in the format "IF att == val AND ... THEN class = label", one rule on each line. | def print_decision_rules(self, attribute_names=None, class_name="class"):
rules = myutils.extractRules(tree=self.tree, rules=[], stmt='' , previous_value='', class_name=class_name)
for rule in rules:
print(rule) | [
"def print_decision_rules(self, attribute_names=None, class_name=\"class\"):\n att = \"\"\n if attribute_names != None:\n temp = self.tree[1]\n index = int(temp[len(temp) - 1])\n att = attribute_names[index]\n else:\n att = self.tree[1]\n print... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fits for random forest | def fit(self, remainder, N, M):
self.remainder = remainder
self.N = N
self.M = M
self.pruned_forest = myutils.random_forest_generation(remainder, N, M) | [
"def build_random_forests(features, labels, forests_size):\r\n forest = []\r\n # number of features to be used for measurement calculation for each node\r\n feature_batch_size = int(len(features[0]) / 5)\r\n number_of_samples = len(labels)\r\n for i in range(forests_size):\r\n # randomly selec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compare Application Database Settings between two appliances | def compare(isamAppliance1, isamAppliance2):
ret_obj1 = get(isamAppliance1)
ret_obj2 = get(isamAppliance2)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[]) | [
"def config_compare(self, config1, config2):\n return self.result_compare(self.driver.results_query(config=config1).one(),\n self.driver.results_query(config=config2).one())",
"def config_compare(self, config1, config2):\n return self.result_compare(\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simply reads in an index file into a list and returns it | def load_index_file(indexfile):
with open(indexfile) as f:
index = f.readlines()
return index | [
"def read_index(index_file):\n index = dict(map(\n lambda x: reversed(x[:-1].split('\\t')),\n index_file\n ))\n return index",
"def read_document_index(user_directory) -> List:\n\n path = os.path.join(user_directory, \"index.json\")\n if not os.path.exists(path):\n return list(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a new dictionary to store all the atoms that are crystalline, icosahedra, etc. | def generate_atom_dict(model):
atom_dict = {}
for atom in model.atoms:
atom_dict[atom.vp.type] = atom_dict.get(atom.vp.type,[]) + [atom]
return atom_dict | [
"def __create_info_dict(self):\n d = ['mtype', 'stype', 'sval']\n keys = ['_'.join(i) for n in range(5) for i in itertools.permutations(d, n) if not len(i) == 0]\n out = {i: {} for i in keys}\n return out",
"def _set_atom_classes(self):\n self._atom_classes = {'unique_old_atoms'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints the index and type of each atom in m | def print_all(m):
for atom in m.atoms:
print("{0} {1} {2}".format(atom, atom.vp.index, atom.vp.type)) | [
"def show_atom_index(self):\r\n try:\r\n self.show_atom_index_judge = True\r\n self.show_atom_element_judge = False\r\n\r\n self.plot(self.Atomsobject)\r\n except Exception as e:\r\n print(e)",
"def printMatrix(*args):\n\n for M in args:\n if typ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Index should be a 4list, e.g. [0,0,12,0]. This function goes thru the model and finds all atoms with index "index" and saves that atom's VP as a new model, with name "temp{atom.id}.cif | def save_vp_cluster_with_index(m,index):
for atom in m.atoms:
if(atom.vp.index[0:4] == index):
temp_model = Model("VP with index {0}".format(index), m.lx, m.ly, m.lz, atom.neighs+[atom])
temp_model.write_cif("temp{0}.cif".format(atom.id))
print("Saved VP cluster to modelf... | [
"def shapenet_models(params, index: int = 0):\n model_path = \"models/model_normalized.obj\"\n synset = params.synsets[params.category]\n\n model_list = os.listdir(join(params.shapenet_path, synset))\n model_paths = [\n join(params.shapenet_path, synset, c, model_path) for c in model_list\n ]\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return feed_ops, feed_run_ops same as ptm example code not used very much, since not imporve result | def feed_ops(self):
if FLAGS.reinforcement_learning:
pass
if FLAGS.feed_initial_sate:
return [self.decoder.initial_state], [self.decoder.final_state]
else:
return [], [] | [
"def run_feeds(*args, **kwargs):\n return list(run_feeds_iter(*args, **kwargs))",
"def get_training_ops(self):\n return self.get_training_op(), self.get_loss(), self.get_accuracy()",
"def _set_task(self):\r\n for k in self.train_tasks:\r\n k_dict = {'train/'+k: \r\n {'tr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if perl deps are missing. | def perl_deps_missing():
global REASON
try:
perl.PerlCheck(misc.Options(verbosity=1))
except SkipOptionalCheck as e:
REASON = str(e)
return True
return False | [
"def _check_dependencies():\n logger.info('Checking program dependencies ...')\n\n if not which('ruby'):\n logger.warn('Ruby not found')\n logger.info('Running apt-get update ...')\n run('apt-get update')\n logger.info('Installing ruby ...')\n run('apt-get install git-core r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ebuilds without DIST_VERSION defined are skipped. | def test_no_dist_version(self):
self.assertNoReport(self.mk_check(), self.mk_pkg('1.7.0')) | [
"def _reject_forge_version_parameter(args):\n if args.forge_version:\n raise CliError('it is not supported to specify --version without '\n '--forge.')",
"def version_uses_new_config () -> bool:\n if app_version_major < 2:\n return False\n else:\n if app_version... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check initialization fails if perl isn't installed. | def test_no_perl(self):
with patch('subprocess.Popen') as popen:
popen.side_effect = FileNotFoundError('perl not available')
with pytest.raises(SkipOptionalCheck) as excinfo:
self.mk_check()
assert 'perl not installed' in str(excinfo.value) | [
"def perl_deps_missing():\n global REASON\n try:\n perl.PerlCheck(misc.Options(verbosity=1))\n except SkipOptionalCheck as e:\n REASON = str(e)\n return True\n return False",
"def module_check():\n\tstatus = True\n\ttry:\n\t\timport fpdf\n\t\tprint '[+] Fpdf module installed.'\n\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Raise socket binding exceptions that aren't due to rebinding. | def test_socket_bind_error(self):
with patch('socket.socket') as mock_socket:
mock_socket.return_value.bind.side_effect = OSError(errno.ENOTSOCK, 'foo')
with pytest.raises(OSError) as excinfo:
self.mk_check()
assert excinfo.value.errno == errno.ENOTSOCK | [
"def test_create_client_side_connection_re_raises_uncaught_errors(self):\n session = mock.Mock()\n session._create_client_side_connection = mock.Mock()\n error = socket_error()\n # Some error that isn't EADDRNOTAVAIL\n error.errno = errno.ESOCKTNOSUPPORT\n with mock.patch('... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Imputes missing values found in pandas dataframe/s using sklearn median imputation. | def median_imputation(dataframes):
return __sklearn_imputation(dataframes, "median") | [
"def impute_missing_values(X):\n col_means=np.nanmean(X,axis=0)\n inds=np.where(np.isnan(X))\n X[inds]=np.take(col_means,inds[1])\n return X",
"def ImputeWithMedian(train_df, test_df, cols=[\"price\", \"year\"]):\n\n # separate to-be-imputed columns\n train_df = pd.DataFrame(train_df[cols])\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fourier transform of the sinegaussian wavelet. This uses the convention H(f) = integral[ h(t) exp(2pi i f t) dt] | def sinegauss_FT(f, t0, f0, a):
return (np.sqrt(np.pi / a)
* np.exp(-2j * np.pi * f * t0)
* np.exp(-np.pi ** 2 * (f - f0) ** 2 / a)) | [
"def fft_halfrange_sin(x,axis=-1):\n N=x.shape[axis]\n x2 = np.concatenate([x,-np.flip(x,axis=axis)],axis=axis)\n fft = np.fft.fft(x2,axis=axis)\n sineterms = 1j*(np.take(fft,range(1,N+1),axis=axis)-np.take(fft,range(2*N-1,N-1,-1),axis=axis))\n sineterms[put_at(N-1,axis=axis)] = 1j*np.take(fft,N,axis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Back to the start migration | def migrate_to_start(self):
call_command('migrate', self.django_application, self.start_migration,
verbosity=0) | [
"def _after_migration(self):\n pass",
"def rollback(self):\n\n self.connection.close()\n self.migration.revert()\n self.current_version = self.migration.version - 1\n self.connection = sqlite3.connect(self.db)",
"def _before_migration(self):\n pass",
"def migrate_roll... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
starts after client connected, simply sends history of chat to the client history messages since the server was started | def send_history_to_client(client):
for message in sended_messages:
client.send(bytes(message.prefix+": ", "utf8") + message.text)
time.sleep(0.05) | [
"def chat_main(client):\n\n # import pdb; pdb.set_trace()\n session = Session(client)\n session.loop()",
"def start_chat():\n\n response = requests.post('{}/start?rcs=1&firstevents=1&m=0&randid={}'.format(\n server,\n generate_randid()\n ))\n\n client_id = json.loads(response.text)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a point cloud to a 3D image. | def point_cloud_to_image(cloud: np.ndarray, indices: np.ndarray, properties: conv.ImageProperties):
img_arr = np.zeros(properties.size[::-1]) if cloud.ndim == 1 else \
np.zeros(properties.size[::-1] + (cloud.shape[-1], ))
for idx in range(cloud.shape[0]):
x, y, z = indices[idx]
img_arr[z... | [
"def visualize_pointcloud_new(pointcloud, name, save_path):\n # Open 3D can only store pointcloud as .ply\n save_file_ply = os.path.join(save_path, \"{}.ply\".format(name))\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(pointcloud)\n o3d.io.write_point_cloud(save_file_ply... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforms an image index to physical coordinates. The transformation is given by x = D S v + o where x is coordinate of the voxel in physical space, v is voxel index, o is origin, D is direction matrix, and S is diag (spacing). | def transform_to_physical_coordinates(index):
return np.matmul(
np.matmul(np.array(properties.direction).reshape(3, 3), np.diag(properties.spacing)),
index) + properties.origin | [
"def TransformPhysicalPointToIndex(self, point: 'itkPointD4') -> \"itkIndex4\":\n return _itkImagePython.itkImageBase4_TransformPhysicalPointToIndex(self, point)",
"def TransformPhysicalPointToIndex(self, point: 'itkPointD3') -> \"itkIndex3\":\n return _itkImagePython.itkImageBase3_TransformPhysical... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method called by streamConnect, streamTap and streamConnectBiDir to access slave. This is method is called to request the data channel. | def getDataChannel(self):
return self._data_slave | [
"def connect_stream(stream):\n return factory.connect_stream(stream, SlaveService)",
"def InitSlave(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def serviceStarted(self):\n self.openChannel(ShellSlaveChannel(conn=self))",
"def connect_thread():\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method called by streamConnect, streamTap and streamConnectBiDir to access slave. This is method is called to request the metadata channel. | def getMetaChannel(self):
return self._meta_slave | [
"def getMetadataStream(self) -> ghidra.app.util.bin.format.pe.cli.streams.CliStreamMetadata:\n ...",
"def getDataChannel(self):\n return self._data_slave",
"def getMetadata(streamName=\"string\", listChannelNames=bool, memberName=\"string\", scene=bool, endIndex=\"string\", channelName=\"string\",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove unused joins from an expression. This only removes joins when we know that the join condition doesn't produce duplicate rows. | def eliminate_joins(expression):
for scope in traverse_scope(expression):
# If any columns in this scope aren't qualified, it's hard to determine if a join isn't used.
# It's probably possible to infer this from the outputs of derived tables.
# But for now, let's just skip this rule.
... | [
"def eliminate_qualify(expression: exp.Expression) -> exp.Expression:\n if isinstance(expression, exp.Select) and expression.args.get(\"qualify\"):\n taken = set(expression.named_selects)\n for select in expression.selects:\n if not select.alias_or_name:\n alias = find_new... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine output columns of `scope` that must have a unique combination per row | def _unique_outputs(scope):
if scope.expression.args.get("distinct"):
return set(scope.expression.named_selects)
group = scope.expression.args.get("group")
if group:
grouped_expressions = set(group.expressions)
grouped_outputs = set()
unique_outputs = set()
for sele... | [
"def column_uniqueness(self):\n line = \"\"\n for column in range(self.size):\n for value in range(self.size):\n for ri, rj in it.combinations(range(self.size), 2):\n line += \"-{}{}{} -{}{}{} 0\\n\".format(ri + 1, column + 1, value + 1, rj + 1, column + 1,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract the join condition from a join expression. | def join_condition(join):
name = join.alias_or_name
on = (join.args.get("on") or exp.true()).copy()
source_key = []
join_key = []
def extract_condition(condition):
left, right = condition.unnest_operands()
left_tables = exp.column_table_names(left)
right_tables = exp.column_... | [
"def join_condition(\n a: FromClause,\n b: FromClause,\n a_subset: Optional[FromClause] = None,\n consider_as_foreign_keys: Optional[AbstractSet[ColumnClause[Any]]] = None,\n) -> ColumnElement[bool]:\n return Join._join_condition(\n a,\n b,\n a_subset=a_subset,\n consider_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
React to a CompletedCommand. | def handle_completed_command(self, command: CompletedCommandType) -> None:
pass | [
"def _completed(self, _button=None):\n self._result.callback(self.prompt.edit_text)",
"def actionCompleted(self):\n pass",
"def send_complete(self) -> None:\n self._set_state(\"COMPLETE\")",
"def job_completed(self, build_job, job_status, build_component):\n raise NotImplementedErr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates trees from nodes whose root is self.root | def gen_root_trees(self, nodes, vss, blacklist, data):
assert (isinstance(nodes, list) and
all(isinstance(t, Tree) and t.is_node for t in nodes)), nodes
assert(vss is None or
(isinstance(vss, list) and
all(isinstance(v, tuple) for v in vss))), vss
... | [
"def input_tree(self):\n\n if self.starttreename:\n if self.starttreename[-3:] == 'xml':\n self.starttree = Phylo.read(self.starttreename, \"phyloxml\")\n elif self.starttreename[-6:] == 'newick':\n self.starttree = Phylo.read(self.starttreename, \"newick\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a formula recursively to represent the data structure of tree based on input value v and data. | def gen_formula(self, v:int, data:Dict[str, List]):
if Miscs.is_expr(self.root):
return Z3.parse(str(self.root)) == v
elif isinstance(self.root, str) and special_str in self.root:
# special case {'first_idx':i,'coef':z}
myroot = self.root.replace(special_str, "")
... | [
"def build_tree(data, impurity, p_val=1):\r\n \r\n \r\n ###########################################################################\r\n # TODO: Implement the function. #\r\n ###########################################################################\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Go through each nesting (aexp), generate a SMT formula, and checks its satisfiability. | def peelme(self, data):
vi = [[(v, i) for i in idxs]
for v, idxs in data[self.lt.root].items()]
vi = list(itertools.chain(*vi))
# print(data[self.lt.root])
# print(vi)
sts = [self.gen_template(i, len(vi) == 1).rt for _, i in vi]
formula = Z3._and([rh.gen_fo... | [
"def satisfying_assignment(formula):\n assignment = {}\n# for clause in formula:\n# for literal in clause: d \n# if literal[0] not in assignment:\n# assignment[literal[0]] = None\n# else:\n# continue \n if formula == []:\n# print('empty')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if the function is commutative >>> assert not ExtFun('sub').commute >>> assert ExtFun('add').commute >>> assert not ExtFun('something').commute | def commute(self):
try:
return ExtFun.d[self][1]
except KeyError:
"""
If we don't know anything about the function, then
the default is non commutative.
"""
return False | [
"def test_connected_gates(self, do_commutative_analysis):\n circuit1 = QuantumCircuit(4)\n circuit1.cx(0, 1)\n circuit1.cx(1, 0)\n circuit1.cx(2, 3)\n circuit1.swap(0, 3)\n\n # collect linear functions\n circuit2 = PassManager(\n CollectLinearFunctions(do_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
create representations for extfuns | def gen_ef_data(extfuns, avals):
assert(isinstance(extfuns, list) and extfuns
and all(isinstance(f, str) for f in extfuns)), extfuns
assert(isinstance(avals, set) and
all(isinstance(v, int) for v in avals)), avals
mlog.debug(
f"gen_ef_data({','.join(ex... | [
"def gen_extfuns(cls, tc, xinfo):\n assert isinstance(tc, dict), tc\n assert isinstance(xinfo, XInfo), xinfo\n\n # print(xinfo)\n # print(tc.keys())\n # print(tc)\n extfuns = [ExtFun(x) for x in xinfo.extfuns]\n if not extfuns:\n return []\n\n mlog.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of dicts representing extfuns The values of the extfuns are customized over the given tc | def gen_extfuns(cls, tc, xinfo):
assert isinstance(tc, dict), tc
assert isinstance(xinfo, XInfo), xinfo
# print(xinfo)
# print(tc.keys())
# print(tc)
extfuns = [ExtFun(x) for x in xinfo.extfuns]
if not extfuns:
return []
mlog.debug(f"gen_extf... | [
"def GetFunctionToExtensionsMap(extensions):\n function_to_extensions = {}\n for extension, functions in extensions.items():\n for function in functions:\n if not function in function_to_extensions:\n function_to_extensions[function] = set([])\n function_to_extensions[function].add(extension)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of dicts representing extvars | def gen_extvars(cls, xinfo):
assert isinstance(xinfo, XInfo), xinfo
if not xinfo.extvars:
return []
extvars = [cls.parse_extvar(e) for e in xinfo.extvars]
mlog.debug(f"generate {len(extvars)} ext vars: {extvars}")
return extvars | [
"def _get_vm_extension_list(vm_iv):\n extensions = {}\n extension_list = []\n for e in vm_iv.get('extensions', []):\n extension_list.append(e['name'])\n extensions['extensions'] = extension_list\n return extensions",
"def get_vars():\n thevars = {\n 'kernel_version': {\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if values in vss can be found from rdata and performs branching if necessary in the case of multiple occurences. The output is a list of size == dim of rdata. | def reach(vss, rdata):
assert(isinstance(vss, list) and
all(isinstance(vs, tuple) for vs in vss)), vss
rs = [[rdata[v] for v in vs if v in rdata] for vs in vss]
if any(not r for r in rs):
return []
else:
rs = [itertools.chain(*r) for r in rs]
... | [
"def check_indexed_ragged_array(self, ds):\n ret_val = []\n reasoning = []\n for name,var in ds.dataset.variables.iteritems():\n if getattr(var,'instance_dimension',''):\n result = Result(BaseCheck.MEDIUM, \\\n True, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uses speedtestcli to perform a speedtest. Returns a dictionary with the current time and results. | def get_speed(self):
std_out, _, _ = self.run_command("speedtest-cli --simple", default_asserts=True)
print(std_out)
current_ping = float(std_out[0].replace('Ping: ', '').replace(' ms', ''))
current_download = float(std_out[1].replace('Download: ', '').replace(' Mbit/s', ''))
cu... | [
"def speedtest():\n jret = _run_speedify_cmd([\"speedtest\"], cmdtimeout=600)\n return jret",
"def run_simple_speedtest():\n\t# run speedtest - cli\n\tprint('running test')\n\tspeed = os.popen(\"speedtest-cli --simple\").read()\n\tprint('done')\n\t\n\t# split the 3 line result (ping,down and up)\n\tlines = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes dict with results and compares speed to the defined threshold. Sends tweet if threshold not met. | def check_threshold(self):
custom_tweet = "Your custom tweet here"
results = self.get_speed()
if results['download'] < self.download_threshold:
print("Sending tweet...")
self.send_tweet(custom_tweet) | [
"def analyze_tweet_retweet(self, data):\n\n\t\tlog.info(f\"Analyzing possibility to like retweet with id: <{data['tweet_id']}>\")\n\n\t\theuristic_value = 0\n\t\t# Verify if there's a relation between the bot and the user\n\t\theuristic_value += self._score_for_relation(data)\n\n\t\t# Next we check if the bot and t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a pair of spans and/or span arrays. span1 + span2 == minimal span that covers both spans | def __add__(self, other) -> Union[Span, "TokenSpan", SpanArray, "TokenSpanArray"]:
if isinstance(self, TokenSpan) and isinstance(other, TokenSpan):
# TokenSpan + TokenSpan = TokenSpan
_check_same_tokens(self, other)
return TokenSpan(self.tokens, min(self.begin_token, other.be... | [
"def __add__(self, other) -> Union[\"Span\", \"SpanArray\"]:\n if isinstance(self, Span) and isinstance(other, Span):\n # Span + *Span = Span\n _check_same_text(self, other)\n return Span(self.target_text, min(self.begin, other.begin),\n max(self.end, o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convenience method for building null spans. | def make_null(cls, tokens):
return TokenSpan(
tokens, TokenSpan.NULL_OFFSET_VALUE, TokenSpan.NULL_OFFSET_VALUE
) | [
"def cull(spans: Iterable[ReportSpan], min_length: int = 5) -> Iterable[ReportSpan]:\n for span in spans:\n if not span.length() < min_length:\n yield span",
"def buildNull():\n return NullDamageDelegate()",
"def _shift_spans_to_start_at_zero(spans):\n adjusted_spans = []\n for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
span1 < span2 if span1.end <= span2.begin | def __lt__(self, other):
if isinstance(other, TokenSpan):
# Use token offsets when available
return self.end_token <= other.begin_token
else:
return Span.__lt__(self, other) | [
"def compare_using_equality(span1, span2):\n for first_token in [t for t in span1 if not t.is_stop]:\n for second_token in [t for t in span2 if not t.is_stop]:\n if first_token.lemma_.lower() == second_token.lemma_.lower():\n return True, first_token, second_token\n return Fal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert the given extension array of type ArrowTokenSpanType to a TokenSpanArray. | def __from_arrow__(self, extension_array):
from text_extensions_for_pandas.array.arrow_conversion import arrow_to_token_span
return arrow_to_token_span(extension_array) | [
"def make_array(cls, o) -> \"TokenSpanArray\":\n if isinstance(o, TokenSpanArray):\n return o\n elif isinstance(o, pd.Series):\n return cls.make_array(o.values)\n elif isinstance(o, Sequence):\n return cls._from_sequence(o)\n elif isinstance(o, Iterable):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if scalar item exists in this TokenSpanArray. | def __contains__(self, item) -> bool:
if isinstance(item, TokenSpan) and \
item.begin == TokenSpan.NULL_OFFSET_VALUE:
return TokenSpan.NULL_OFFSET_VALUE in self._begin_tokens
return super().__contains__(item) | [
"def contains(self, spel_pos):\n return spel_pos in self.pointer_array",
"def __contains__(self, item) -> bool:\r\n return item in self.stack",
"def __contains__(self, item: Any) -> bool:\n if len(self.subtrees) == 0:\n return self.value == item\n else:\n for su... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make a `TokenSpanArray` object out of any of several types of input. | def make_array(cls, o) -> "TokenSpanArray":
if isinstance(o, TokenSpanArray):
return o
elif isinstance(o, pd.Series):
return cls.make_array(o.values)
elif isinstance(o, Sequence):
return cls._from_sequence(o)
elif isinstance(o, Iterable):
r... | [
"def make_array(cls, o) -> \"SpanArray\":\n if isinstance(o, SpanArray):\n return o\n elif isinstance(o, pd.Series):\n return cls.make_array(o.values)\n elif isinstance(o, Iterable):\n return cls._from_sequence(o)",
"def __add__(self, other) -> Union[Span, \"T... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Align a set of character or tokenbased spans to a specified tokenization, producing a `TokenSpanArray` of tokenbased spans. | def align_to_tokens(cls, tokens: Any, spans: Any):
tokens = SpanArray.make_array(tokens)
spans = SpanArray.make_array(spans)
if not tokens.is_single_document:
raise ValueError(f"Tokens cover more than one document (tokens are {tokens})")
if not spans.is_single_document:
... | [
"def tokenize_and_align_positions(origin_text, start_position, end_position, tokenizer):\n orig_to_tok_index = []\n tok_to_orig_index = []\n tokenized_text = []\n for (i, token) in enumerate(origin_text):\n orig_to_tok_index.append(len(tokenized_text))\n sub_tokens = tokenizer(token)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns (begin, end) pairs as an array of tuples | def as_tuples(self) -> np.ndarray:
return np.concatenate(
(self.begin.reshape((-1, 1)), self.end.reshape((-1, 1))), axis=1
) | [
"def bounds_array_to_tuples(a):\n t = tuple([(lo, hi) for lo, hi in zip(a[0], a[1])])\n return t",
"def tuple(self):\n return self.start.coordinates[0], self.start.coordinates[1], self.end.coordinates[0], self.end.coordinates[1]",
"def bounds_tuples_to_array(t):\n a = np.array([[b[0] for b in t]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an array of the substrings of `target_text` corresponding to the spans in this array. | def covered_text(self) -> np.ndarray:
texts = [
None if self.nulls_mask[i]
else self.target_text[i][self.begin[i]:self.end[i]]
for i in range(len(self))
]
return np.array(texts, dtype=object) | [
"def span_tokenize(self, text):\n return [sl for sl in self._slices_from_text(text)]",
"def sentences_from_text(self, text, realign_boundaries=True):\n sents = [text[sl] for sl in self._slices_from_text(text)]\n if realign_boundaries:\n sents = self._realign_boundaries(sents)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterates over the addressmessagedictionary and send the message to the address. If it was not possible to send messages, the thread will first sleep __RETRY_FIRST_TIME and try it again. If it was not possible again for some messages it will sleep __RETRY_ELSE_TIME until __SENDER_MSG_TIME is reached. Then the origin sen... | def __sendAllMessages(self):
#If the origin mail was re-encrypted, addrMsgDict is not None.
#Otherwise it needs to be created from one the prepare methods.
if not self.get_addr_msg_dict():
if self.get_addr_fingerprint_key_inf():
self.__prepareSigAndEncMsg()
... | [
"def __send(self, tosend, source):\n for line in tosend.splitlines(True):\n if self.delay:\n debug('ReplyHandler.send: delay answer for %d ms', self.delay)\n sleep(self.delay / 1000.0)\n debug('ReplyHandler.send(%s): %r', source, line)\n self.req... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare a signed and encrypted message for the sender of the mail. It creates an | def __prepareSigAndEncMsg(self):
try:
#It is necessary to send the distributer keys in the attachment.
if _util.objectsNotNone(self.get_dist_key_idsig(), self.get_dist_keys()):
msg = _util.generateMIMEMsg('mixed', self.get_dist_keys(), None, None, None, None, ... | [
"def __prepareSigMsg(self):\n try: \n userInfoTmp = 'FIRST ERROR: ' + self.get_user_info()\n addressMsgDict = {}\n if self.get_dist_key_idsig() is None:\n (_distKeyIDEnc, distKeyIDSig) = self.gnupg.getKeyIDsFromDist(self.get_dist_addr())\n self.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare a signed message, if any exception occurred in the process and it was not possible to get informations about the sender of the mail to encrypt that message. It creates an address | def __prepareSigMsg(self):
try:
userInfoTmp = 'FIRST ERROR: ' + self.get_user_info()
addressMsgDict = {}
if self.get_dist_key_idsig() is None:
(_distKeyIDEnc, distKeyIDSig) = self.gnupg.getKeyIDsFromDist(self.get_dist_addr())
self.set_dist_key... | [
"def __prepareSigAndEncMsg(self): \n \n try:\n #It is necessary to send the distributer keys in the attachment.\n if _util.objectsNotNone(self.get_dist_key_idsig(), self.get_dist_keys()):\n msg = _util.generateMIMEMsg('mixed', self.get_dist_keys(), None, None, No... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If it was not possible to send the reencrypted mail to some of the recipients of the distributer, it is necessary to inform the origin sender. | def __prepareErrorMSGForSender(self):
dm = DistributerManager.DistributerManager()
dkm = DistributerKeyManager.DistributerKeyManager()
userInfo = 'YOUR MESSAGE FROM ' + self.get_timestamp() + ' COULD NOT SEND TO ' + ', '.join(list(self.get_addr_msg_dict().keys()))
self.set_user_info(user... | [
"def __sendAllMessages(self):\n #If the origin mail was re-encrypted, addrMsgDict is not None.\n #Otherwise it needs to be created from one the prepare methods.\n if not self.get_addr_msg_dict():\n if self.get_addr_fingerprint_key_inf():\n self.__prepareSigAndEncMsg()\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determining whether a given list of integer values of arbitrary length is sorted in a given order. | def is_sorted(num_list: List[int], sort_order: SortOrder) -> bool:
contain_invalid_data = any([i for i in num_list
if not isinstance(i, int)])
if contain_invalid_data \
or not num_list \
or not isinstance(sort_order, SortOrder):
raise TypeError
... | [
"def is_sorted(my_list):\n if type(my_list).__name__ == 'list':\n for i in range(len(my_list) - 1):\n if type(my_list[i]).__name__ == 'int' and my_list[i] < my_list[i+1]:\n continue\n else:\n return False\n return True\n else:\n return F... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove at item (last one by default) and get its value Remove the last element if the provided position is greater than the length of the list Raise ValueError if the list is empty Raise IndexError if the provided position is negative | def pop(self, position: int = None):
if not self._head:
raise Exception("Cannot pop from an empty list")
if position == None:
position = self._count
if position < 0:
raise ValueError("Negative")
# Find the desired item given a position
... | [
"def test_customListRemove_whenIndexIsLast_shouldRemoveIt(self):\n cl = self.setup_list(1, 2, 3)\n result = cl.remove(2)\n\n self.assertEqual(3, result)",
"def remove_item_by_position(self, position: int) -> Union[Item, None]:\n if 0 <= position < len(self.items):\n return s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method parses the result of the certinfo command. | def _parse_cert(command_result):
not_before = datetime.datetime.strptime(command_result['validity']['notBefore'], '%b %d %H:%M:%S %Y %Z')
not_after = datetime.datetime.strptime(command_result['validity']['notAfter'], '%b %d %H:%M:%S %Y %Z')
utc_ts = datetime.datetime.utcnow()
trusted_result = _is_trus... | [
"def _parse_cert(self):\n self.extensions = []\n\n PARSING_ALT_NAMES = False\n PARSING_HEX_SERIAL = False\n for line in self.cert_string.split('\\n'):\n l = line.strip()\n if PARSING_ALT_NAMES:\n # We're parsing a 'Subject Alternative Name' line\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This Method parses the information of the tested cipher suites for one ssl protocol. | def _parse_ciphers(result, protocol, public_key_size):
ciphers_list = []
key_status_list = [
('preferredCipherSuite', 'preferred'),
('acceptedCipherSuites', 'accepted'),
('errors', 'error'),
('rejectedCipherSuites', 'rejected')
]
protocol_dict = dict(
tlsv1='... | [
"def get_ssl_cipher_suites(self):\n return self._ssl_ciphers",
"def test_protocol_sslv23(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n if hasattr(ssl, 'PROTOCOL_SSLv2'):\n try:\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show the list of endpoints, these can be used as args for 'curl' command | def do_get_endpoints(cs, args):
out = get_auth_response(args)
if args.debug:
json.loads(out)
pretty_print(out)
data = json.loads(out)
services = data['access']['serviceCatalog']
#cache output for auto-complete
cache = True
try:
home = expanduser("~") + "/.raxcu... | [
"def show_list(self):\n self._init_repo()\n for filename in glob(self.app.get_home_path() + '/.*'):\n endpoint = Endpoint(self.app, filename)\n if endpoint.is_visible():\n print(endpoint.path)",
"def list_endpoints(self, **kwargs):\n\n all_params = ['prett... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the API key stored in the keychain for the username | def do_set_api_key(cs, args):
capture_password(args.username) | [
"def set_api_key(self, key):\n self.api_key = key",
"def user_api_key(self, user_api_key):\n\n self._user_api_key = user_api_key",
"def set_key(self, apikey):\n force = os.getenv(\"FORCE_BRIDGES_APIKEY\", \"\")\n if (force != \"\"):\n apikey = force\n self._key = ap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute a curl GET command | def do_get(cs, args):
url = args.url
#translate the endpoint into an actual url
(endpoint, token) = get_endpoint_and_token(args)
curl_args = ''
if url:
curl_args = endpoint + url
curl_args = curl_args + " -H \"X-Auth-Token: " + token + "\""
out = curl(args, curl_args)
if ... | [
"def _cf_curl_get(path):\n cmd_output = cf_cli.get_command_output(CF_CURL + [path])\n response_json = json.loads(cmd_output)\n if 'error_code' not in response_json:\n return response_json\n else:\n raise cf_cli.CommandFailedError('Failed GET on CF API path {}\\n'\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute a curl POST command (PIPE in data via STDIN) | def do_post(cs, args):
url = args.url
#translate the endpoint shortcut into an actual url
(endpoint, token) = get_endpoint_and_token(args)
curl_args = ''
if url:
curl_args = endpoint + url
curl_args = curl_args + " -H \"X-Auth-Token: " + token + "\""
curl_args = curl_args + " -H ... | [
"def post_data(data, url, curl_command=\"curl\", use_curl=False,\n content_type=\"text/xml\", path=None, use_chunked=False,\n is_odk=False, attachments=None):\n attachments = attachments or []\n results = None\n errors = None\n\n if path is not None:\n with open(path, 'r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute a curl DELETE command | def do_delete(cs, args):
url = args.url
#translate the endpoint shortcut into an actual url
(endpoint, token) = get_endpoint_and_token(args)
curl_args = ''
if url:
curl_args = endpoint + url
curl_args = curl_args + " -H \"X-Auth-Token: " + token + "\""
curl_args = curl_args + " -... | [
"def test_request_delete(self):\n r = self.base._request('/delete', 'DELETE', {\n 'foo': 'bar'\n })\n self.assertEqual(r['url'], 'https://httpbin.org/delete?foo=bar')\n self.assertEqual(r['headers']['Client'], 'foo.bar')\n self.assertEqual(r['headers']['Token'], 'foobar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This returns a FileDialogButton class that will call the specified function with the resulting file. | def get(fn,filter='*'):
class FileDialogButton(Tkinter.Button):
# This is just an ordinary button with special colors.
def __init__(self, master=None, cnf={}, **kw):
'''when we get a file, we call fn(filename)'''
self.fn = fn
self.__toggle... | [
"def fileBrowserDialog(fileType=\"string\", mode=int, actionName=\"string\", includeName=\"string\", operationMode=\"string\", fileCommand=\"string\", tipMessage=\"string\", dialogStyle=int, filterList=\"string\", windowTitle=\"string\"):\n pass",
"def fileDialog2(caption=\"string\", selectionChanged=\"string\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make a slider [low,high] tied to variable. | def slider(self, parent, variable, low, high, label):
widget = Scale(parent, orient='vertical',
from_=high, to=low, # range of slider
# tickmarks on the slider "axis":
tickinterval=(high-low)/5.0,
# the steps of the counter above the slider:
resolution=(high-lo... | [
"def range_slider(\n name: str,\n label: Optional[str] = None,\n min: Optional[float] = None,\n max: Optional[float] = None,\n step: Optional[float] = None,\n min_value: Optional[float] = None,\n max_value: Optional[float] = None,\n disabled: Optional[bool] = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make directory name box | def mkdnb(self):
return self.createcomponent(
'dirnamebox',
(), None,
Pmw.ScrolledListBox, (self.interior(),),
label_text='directories',
labelpos='n',
hscrollmode='none',
dblclickcommand=self.selectdir) | [
"def directorybox(self, text=\"Choose Directory\", slot=None, default=True):\n if slot:\n self.directorybutton = NXPushButton(text, slot)\n else:\n self.directorybutton = NXPushButton(text, self.choose_directory)\n self.directoryname = NXLineEdit(parent=self)\n sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make filename list box | def mkfnb(self):
return self.createcomponent(
'filenamebox',
(), None,
Pmw.ScrolledListBox, (self.interior(),),
label_text='files',
labelpos='n',
hscrollmode='none',
selectioncommand=self.singleselectfile,
dblclickcommand=self.selectfile) | [
"def insert_files():\n filenames = filedialog.askopenfilenames(multiple = True)\n for file in filenames:\n listbox.insert( 'end', file)",
"def set_file_names(self):\n pg = self.notebook.get_nth_page(1)\n pg.set_border_width(5)\n alignment = gtk.Alignment(0, 0)\n frame = gt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert text v into the entry and at the top of the list of the combobox w, remove duplicates | def tidy(self,w,v):
if not v:
return
entry=w.component('entry')
entry.delete(0,'end')
entry.insert(0,v)
list=w.component('scrolledlist')
list.insert(0,v)
index=1
while index<list.index('end'):
k=list.get(index)
if k==v or index>self['historylen']:
list.delete(index)
else:
index=index+1
... | [
"def set_uniq_data_to_listwidget(Table_obj, listwidget_obj, column_name):\n listwidget_obj.clear()\n unique = []\n for column in range(Table_obj.columnCount()):\n if Table_obj.horizontalHeaderItem(column).text() == column_name:\n for row in range(Table_obj.rowCount()):\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |