query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Return all siblings of this person. Siblings are considered to be people with at least one common parent | def _get_siblings(self, gender):
siblings = set()
for parent in self._get_known_parents():
siblings |= set(parent.children(gender))
siblings -= {self}
return siblings | [
"def get_siblings(self, person):\n for p in self.get_parents(person):\n for c in self.get_children(p):\n if c != person:\n yield c",
"def siblings(self) -> QuerySet['TreeModel']:\n return self.__class__.objects.exclude(id=self.id).filter(parent_id=self.pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of all brothers of this person. | def get_brothers(self):
return list(self._get_siblings(self.MALE)) | [
"def get_children(self, person):\n ret = []\n for f in self.families:\n if person in f.parents:\n for p in f.children:\n ret.append(p)\n return sorted(ret, key=lambda x: x.ubirth)",
"def others(self):\n return [b for b in self.boids if b is ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of all sisters of this person. | def get_sisters(self):
return list(self._get_siblings(self.FEMALE)) | [
"def get_all_students(self):\n return self.__student_repository.get_all()",
"def listSpecies(self):\n rows = yield self._db.runQuery(self._listSpeciesSQL)\n returnValue([name for (name,) in rows])",
"def senator_list():\n\n senators = Senator.query.order_by(Senator.state).all()\n\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all children of this person, optionally filtered by gender. | def children(self, gender=None):
if gender:
return [child for child in self._children if
child.gender == gender]
else:
return self._children | [
"def get_children(self, person):\n ret = []\n for f in self.families:\n if person in f.parents:\n for p in f.children:\n ret.append(p)\n return sorted(ret, key=lambda x: x.ubirth)",
"def _get_siblings(self, gender):\n siblings = set()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to guess what the user might have done by heuristically looking at cursor movement number of changed lines and if they got longer or shorter. This will detect most simple movements like insertion, deletion of a line or carriage return. | def guess_edit(initial_line, lt, ct, vs):
if not len(lt) and not len(ct): return True, ()
pos = vs.pos
ppos = vs.ppos
if len(lt) and (not ct or (len(ct) == 1 and not ct[0])): # All text deleted?
es = []
if not ct: ct = ['']
for i in lt:
es.append(("D", initial_line, ... | [
"def check_cursor(self):\n\n view = self.view\n doc = view.document\n\n maxcol, maxrow = self.get_textbox_dimensions()\n y, x = view.cursor_pos\n topy, topx = view.scroll_pos\n\n # This should never happen, but for some reason it does, intermittently.\n # Cannot figu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a sentence, modify each word in the sentence to stemmed word. | def stem_sentence(sentence):
porter = PorterStemmer()
words = word_tokenize(sentence)
stemmed_words = []
for word in words:
stemmed_words.append(porter.stem(word))
stemmed_words.append(" ")
return "".join(stemmed_words) | [
"def _stem_words(stemmer, words):\n return [stemmer.stem(word.lower()) for word in words]",
"def stem_text(text):\n text = utils.to_unicode(text)\n stemmer = PorterStemmer()\n return ' '.join(stemmer.stem(word) for word in text.split())",
"def tokenize_keywords(self, sentence):\n line = sente... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a paragraph return a paragraph whose word is stemmed. | def stem_paragraph(paragraph):
stemmed_sentence = []
for sentence in paragraph.split("\n"):
stemmed = stem_sentence(sentence)
stemmed_sentence.append(stemmed)
stemmed_sentence.append("\n")
return "".join(stemmed_sentence) | [
"def get_stem(word):\r\n #stub\r\n #PLACEHOLDER\r\n\r\n ps = PorterStemmer()\r\n \r\n return word",
"def stem(word):\n global _stemmer\n if _stemmer is None:\n _stemmer = nltk.stem.porter.PorterStemmer()\n return _stemmer.stem(word)",
"def stem_and_filter(self, text):\n tok... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns < 0 if pair 1 has a sooner arrival | def sooner_arrival(pair1, pair2):
return pair1[1] - pair2[1] | [
"def is_successor(pair):\n new = fmri.PkgFmri(pair.new)\n old = fmri.PkgFmri(pair.old)\n return new.is_successor(old)",
"def readyForNewRoute():\r\n if cLoca == cDest & cStat == ready & nDest == 0:\r\n return 1\r\n else:\r\n return 0",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Guess database name from GFF file. | def guess_database(args):
return _guess_database_file(args.gtf, args.database) | [
"def get_database_name(database):\n return _db_names[database]",
"def read_database_name():\n with open(\"model/database_name.json\") as json_file:\n database = json.load(json_file)\n return database[\"DATABASE\"]",
"def obtain_database_name():\n\n while True:\n # Obtaining... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the ID to identify the primary transcript in the GTF file with the miRNA and precursor coordinates to be able to parse BAM files with genomic coordinates. | def get_primary_transcript(database):
if database.find("miRBase") > -1:
return "miRNA_primary_transcript"
else:
raise ValueError("Only miRBase is supported for this action.") | [
"def id(self):\n return self.transcript_id",
"def get_file_id(self):\n return self.get_standard_id(separator='-')",
"def stomate_id(self):\n return self.identifier[0]",
"def getCurrentUtteranceId(self):\n c = self.textCursor()\n if c == None:\n return \"\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load GTF file with precursor positions on genome Return dict with key being precursor name and value a dict of mature miRNA with relative position to precursor. | def read_gtf_to_precursor(gtf):
if not gtf:
return gtf
if _guess_database_file(gtf).find("miRBase") > -1:
mapped = read_gtf_to_precursor_mirbase(gtf)
elif _guess_database_file(gtf).find("MirGeneDB") > -1:
mapped = read_gtf_to_precursor_mirgenedb(gtf)
else:
logger.info("Da... | [
"def read_gtf_to_precursor_mirbase(gtf, format=\"precursor\"):\n if not gtf:\n return gtf\n db = defaultdict(list)\n db_mir = defaultdict(list)\n id_dict = dict()\n with open(gtf) as in_handle:\n for line in in_handle:\n if line.startswith(\"#\"):\n continue\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load GTF file with precursor positions on genome Return dict with key being precursor name and value a dict of mature miRNA with relative position to precursor. For MirGeneDB and similar GFF3 files. | def read_gtf_to_precursor_mirgenedb(gtf, format="precursor"):
if not gtf:
return gtf
db = defaultdict(list)
db_mir = defaultdict(list)
id_dict = dict()
with open(gtf) as in_handle:
for line in in_handle:
if line.startswith("#"):
continue
cols =... | [
"def read_gtf_to_precursor(gtf):\n if not gtf:\n return gtf\n if _guess_database_file(gtf).find(\"miRBase\") > -1:\n mapped = read_gtf_to_precursor_mirbase(gtf)\n elif _guess_database_file(gtf).find(\"MirGeneDB\") > -1:\n mapped = read_gtf_to_precursor_mirgenedb(gtf)\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load GTF file with precursor positions on genome Return dict with key being precursor name and value a dict of mature miRNA with relative position to precursor. For miRBase and similar GFF3 files. | def read_gtf_to_precursor_mirbase(gtf, format="precursor"):
if not gtf:
return gtf
db = defaultdict(list)
db_mir = defaultdict(list)
id_dict = dict()
with open(gtf) as in_handle:
for line in in_handle:
if line.startswith("#"):
continue
cols = l... | [
"def read_gtf_to_precursor(gtf):\n if not gtf:\n return gtf\n if _guess_database_file(gtf).find(\"miRBase\") > -1:\n mapped = read_gtf_to_precursor_mirbase(gtf)\n elif _guess_database_file(gtf).find(\"MirGeneDB\") > -1:\n mapped = read_gtf_to_precursor_mirgenedb(gtf)\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns article raw text and span offsets given the article id | def read_article(article_id):
article_fname = "../datasets/train-articles/article" + str(article_id) + ".txt"
label_fname = (
"../datasets/train-labels-task1-span-identification/article"
+ str(article_id)
+ ".task1-SI.labels"
)
with open(article_fname, newline="\n") as article:
... | [
"def textblocks_by_id(xml_data, font_id):\n text_elements=xml_data.findall(\n 'page[@number=\"1\"]/text[@font=\"%s\"]' % font_id)\n first_page_top=int(xml_data.findall('page[@number=\"1\"]')[0].get('top'))\n first_page_height=int(xml_data.findall(\n 'page[@number=\"1\"]')[0].get('height'))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View for uploading data through form or upload file. Will accept any file format supported by the parser. The form itself sends experiment data and metadata as JSON. | def upload(request):
company = request.user.company
if request.method == "POST":
exp_form = ExperimentForm(request.POST, prefix='exp', company=company)
file_form = FileUpload(request.POST, request.FILES, prefix='file')
exp_data = ExperimentDataForm(request.POST, prefix='exp_data', compa... | [
"def upload_file_from_form():\n global api\n if request.method == 'POST':\n try:\n files = request.files['files']\n api.shared_folder_manager.save_uploaded_file_from_form(files)\n return make_response(jsonify({'message': '{0} uploaded'.format(files.filename)}), 200)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stage is not using data format library | def test_data_formats(): | [
"def processDataBeforeStage(self, stream, data):\n if stream == 'adcp':\n data = [datetime.datetime.fromtimestamp(data[0]), data[1], data[2]]\n indices = self.target_space.append_entry(stream, data)\n elif stream == 'pamguard':\n # comm format matches desired, no need ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test does not use multithreading | def test_multithreading(): | [
"def test_parallel(self):\n try:\n self.test_simple_text(cores=6)\n except AssertionError:\n ...\n except Exception as e:\n raise AssertionError(\"Simple Test breaks when thread count is increased (Or it was broken from the getgo).\")",
"def testBasic(self):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a mapping, per confusable, that contains all the groups in the cp's whole confusable excluding the confusable extent of the cp itself as per the spec at | def _construct_whole_confusable_map() -> Dict[int, Set[str]]:
whole_map: Dict[int, Set[str]] = {}
for whole in NORMALIZATION_SPEC["wholes"]:
whole_confusables: Set[int] = set(whole["valid"] + whole["confused"])
confusable_extents: List[Tuple[Set[int], Set[str]]] = []
for confusable_cp i... | [
"def make_group_map(self):\n \n # set the map to nil, then fill it with rows of \n # spaces to represent the walls in the map\n self.group_map = []\n for _ in range(0, self.height):\n temp = []\n for _ in range(0, self.width):\n temp += [' ']\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate tokens and return the label type. | def _validate_tokens_and_get_label_type(tokens: List[Token]) -> str:
if all(token.type == TokenType.EMOJI for token in tokens):
return "emoji"
label_text = "".join(token.text for token in tokens)
concat_text_tokens_as_str = "".join(
t.text for t in tokens if t.type == TokenType.TEXT
)
... | [
"def token_type(self):\n return self._token",
"def get_label_type(data):\n return get_label_for(data, \"type: \")",
"def label():\n return _make_type(_core.LLVMLabelType(), TYPE_LABEL)",
"def _convert_dict_to_token(self, input):\n type = input.get('type')\n data = input.get('dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks predecessors of a node. | def check_predecessors(node):
if not isinstance(node, Gear):
return False
pred_ = self._graph.predecessors(node)
all_inputs = [True if isinstance(p, GearInput) else False for p in pred_]
return all(all_inputs) or not all_inputs | [
"def no_predecessors_iter(self):\n for n in self.nodes:\n if not len(list(self.predecessors(n))):\n yield n",
"def predecessors(self, node: str):\n predecessors = []\n for symbol in ['A', 'T', 'C', 'G']:\n candidate = symbol + node[:self.k - 1]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all inputs with values of a graph. | def inputs(self) -> dict:
inputs = {
node.name: node.value
for node in self._graph.nodes
if isinstance(node, GearInput)
}
return inputs | [
"def inputs(self):\n return self.node.inputs",
"def getInputNodes(self, nodeName):\n node = self.getNode(nodeName)\n inNodes = []\n for inp in node.input:\n if len([nde for nde in self.graph.node if inp in nde.output]):\n inNodes += [inp]\n elif len... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set input data for the graph computation. | def _set_input(self, input_data: dict):
if input_data.keys() != self.input_shape.keys():
raise ValueError("input data is wrong format - check `network.input_shape`")
inputs = {
node.name: node for node in self._graph.nodes if isinstance(node, GearInput)
}
for na... | [
"def set_input(self, input_node_name):\n self.input_node_name = input_node_name\n self.input = self.get_op(self.input_node_name)",
"def _set_tensor(self, indata):\n\n if isinstance(indata, np.ndarray):\n if indata.ndim == 2 or indata.ndim == 3:\n self.data = indata\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attach input to the gear. | def _attach_input(self, param: inspect.Parameter, dst: Gear) -> GearInput:
value = param.default if param.default != param.empty else None
annotation = param.annotation if param.annotation != param.empty else Any
gear_input = GearInput(
param.name, value, annotation=annotation, grap... | [
"def make_input(self, *args, **kwargs):\r\n self.add(input.Input(*args, **kwargs))",
"def add_input_arg(self, inp):\n self.add_arg(inp._dax_repr())\n self._add_input(inp)",
"def add_input(self, input_):\n self.sigma.add(input_)",
"def add_input(self):\n while True:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attach output to the gear. | def _attach_output(
self, src_gear: Gear, name: str = None, graph_output: bool = False
) -> Union[GearOutput, GearInputOutput]:
if not name:
name = f"{str(src_gear)}"
if graph_output:
src_gear_output = GearOutput(
name, None, src_gear.output_type, gra... | [
"def _add_output(self, out):\n self._outputs += [out]\n out.node = self\n out._set_as_output_of(self)",
"def add_output_arg(self, out):\n self.add_arg(out._dax_repr())\n self._add_output(out)",
"def specify_output(self, io_comp):\n assert(isinstance(io_comp, IO))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add gear to the graph. | def _add_gear(self, gear: Gear):
gear.set_graph(self._graph)
for name, param in gear.params.items():
if param.default and isinstance(param.default, Depends):
src_gear = param.default.gear
src_gear_output = self._attach_output(src_gear, name=name)
... | [
"def add_rocket(self, rocket):\r\n\t\tself.rockets.append(rocket)",
"def add_edge(self, v1, v2):\n self.__graph[v1].append(v2)",
"def add_gear(\n self,\n authorizer_rfid,\n gear_rfid,\n geartype,\n gear_image,\n *required_certs,\n is_new=True,\n **i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a copy of an `Network` instance. | def copy(self) -> "Network":
return Network(self._graph.name, outputs=self._outputting_nodes) | [
"def clone(self, name: str = None) -> \"Network\":\n # pylint: disable=protected-access\n net = object.__new__(Network)\n net._init_fields()\n net.name = name if name is not None else self.name\n net.static_kwargs = util.EasyDict(self.static_kwargs)\n net._build_module_src ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an unsaved image instance for the given ImageFile and title. Override this if your image model requires extra metadata to be filled in, or you want to assign it to a specific collection | def _create_image(self, file, title=None):
return Image(file=file, title=title) | [
"def _get_image(self):\n attr_name = '_image_cache'\n if not getattr(self, attr_name, None):\n self.seek(0)\n setattr(self, attr_name, Image.open(self.file))\n return getattr(self, attr_name)",
"def get_model(image_id):\n image = Image.query.filter_by(id=image_id).fir... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of all nonabstract page models that inherit from WebStoryPageMixin | def get_story_page_models():
return [
model for model in get_page_models()
if issubclass(model, WebStoryPageMixin)
] | [
"def _wikipedia_Page_templatePages(self):\n return [template for template in toolserver.Generators.getTemplatelinks(self)]",
"def test_is_abstract_base_class(self):\n self.assertIsInstance(template.Page, abc.ABCMeta)",
"def content_extensions(self):\n for cls in self.__class__.__bases__:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
this function is called from c++ when inference ends | def end(self,inference):
print "end" | [
"def finalizeInference(self):\n assert False, 'abstract method called'",
"def infer(self):",
"def infinite_infer_run(): \n try:\n model_directory = \"/opt/awscam/artifacts/\"\n # model_name = \"mnist-8\" # onnx-model\n model_name = \"fingerModel.onnx\" # onnx-model\n\n # Cre... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read a message from the server Returns a tuple of the message type and (if required) dict with data | def readMessage(self):
message_type_raw = self.server_socket.recv(1)
message_len_raw = self.server_socket.recv(1)
message_type = struct.unpack('>B', message_type_raw)[0]
message_len = struct.unpack('>B', message_len_raw)[0]
if message_len == 0:
message_data = bytearr... | [
"def _read_message(data, msg):\n if msg.type in IGNORED_MESSAGES:\n data = _ignore(data, msg)\n elif msg.type == 'time_signature':\n # NOTE: right now we're only handling fours\n if msg.numerator == 4 and msg.denominator == 4:\n data = _dict_update(\n data,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the draw date and line as a string. | def as_string(self):
return self.draw_date.isoformat() + ' ' + self.line.as_string() | [
"def get_print(self):\n return ('Trip\\n\\tstart date: {}\\n\\tfinal date: {}\\n\\tgasoline: {}'.\n format(time.strftime(\"%Y.%m.%d %H:%M\",\n time.localtime(self.start_date)),\n time.strftime(\"%Y.%m.%d %H:%M\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all sets of balls for this lottery. | def get_sets_of_balls(self):
LOGGER.debug("LEM:gsob")
return [self._main_balls, self._lucky_stars] | [
"def get_sets_of_balls(self):\n return [self._balls]",
"def mixBalls(self):\n balls = self.generateBalls()\n # Extract balls\n allBalls = [ balls[i][j] for j in range(len(balls[0])) for i in range(len(balls)) ]\n shuffle(allBalls)\n return allBalls",
"def generateBalls(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a tuple containing the sets of balls in the given date range. The result is used for frequency counting so only the number of each ball is returned. | def get_balls_in_date_range(self, oldest_date, newest_date):
LOGGER.debug("LEM:gbidr, len %d, from %s to %s", len(self.draws),
str(oldest_date), str(newest_date))
main_balls = []
lucky_stars = []
for lottery_draw in self.draws:
if lottery_draw.draw_date >... | [
"def get_draws_in_date_range(self, date_from, date_to):\n lottery_draws = []\n # LOGGER.info(date_from, date_to)\n for lottery_draw in self.draws:\n if lottery_draw.draw_date >= date_from \\\n and lottery_draw.draw_date <= date_to:\n # LOGGER.info(\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that creates an Deployment manager profile for IBM WebSphere ND installations. | def make_managerProfile(module):
if module.params['security'] == 'enabled':
module.params['security'] = 'true'
else:
module.params['security'] = 'false'
if module.params['cell_name'] is not None:
create_dmgr_account = """{0}/bin/manageprofiles.sh -create -templatePath \
{0}/profileT... | [
"def make_customProfile(module):\n\n create_custom_profile = \"{0}/bin/manageprofiles.sh -create \\\n-templatePath {0}/profileTemplates/managed/ \\\n-dmgrAdminUserName {1} -dmgrAdminPassword {2} \\\n-profileRoot {3} -profileName {4} -dmgrHost {5}\".format(module.params['path'],\nmodule.params['admin_user'], modu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that creates a custom profile for a IBM Websphere ND Cell | def make_customProfile(module):
create_custom_profile = "{0}/bin/manageprofiles.sh -create \
-templatePath {0}/profileTemplates/managed/ \
-dmgrAdminUserName {1} -dmgrAdminPassword {2} \
-profileRoot {3} -profileName {4} -dmgrHost {5}".format(module.params['path'],
module.params['admin_user'], module.params['admin... | [
"def create_network_profile(self, context, network_profile, fields=None):\n np = network_profile[\"network_profile\"]\n self._validate_network_profile(np)\n with context.session.begin(subtransactions=True):\n net_profile = self._add_network_profile(db_session=context.session,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that will backup any given WAS profile | def backup_profile(module):
backup_profile_cmd = "{0}/bin/backupConfig.sh /{1}/{2}_backup.zip \
-user {3} -password {4} -profileName {5}".format(module.params['profile_path'],
module.params['dest'], module.params['profile'],
module.params['admin_user'], module.params['admin_password'],
mo... | [
"def backup_quality_profile(self, language, qualityProfile, organization):",
"def backup_profile(self, local_dir, serial=None):\n logger.info('Backing up profile...')\n # Backup Wifi\n wifi_dir = os.path.join(local_dir, self._LOCAL_DIR_WIFI)\n wifi_file = os.path.join(local_dir, self._... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that will restore a backup profile archive | def restore_profile(module):
restore_profile_cmd = "{0}/bin/restoreConfig.sh /{1}/{2}_backup.zip \
-user {3} -password {4} -profileName {5}".format(module.params['profile_path'],
module.params['dest'],module.params['profile'], module.params['admin_user'],
module.params['admin_password'], module.params... | [
"def restore_quality_profile(self, backup, organization):",
"def restore():\n backup_dir = f'{args.database}/backups/{datetime.strftime(backups[args.restore - 1], \"%d-%b-%Y_%H-%M-%S\")}'\n\n if os.path.isdir(f'{args.database}/orthologs'):\n shutil.rmtree(f'{args.database}/orthologs')\n shutil.cop... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create and return a new `Inventory` instance, given the validated data. | def create(self, validated_data):
return Inventory.objects.create(**validated_data) | [
"def create_inventory():\n app.logger.info(\"Request to create inventory\")\n check_content_type(\"application/json\")\n inv = Inventory()\n inv.deserialize(request.get_json())\n inv.create()\n message = inv.serialize()\n location_url = url_for(\"get_inventory_item_by_id\", inv_id=inv.id, _exte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gracefully stop the Updater and replace the current process with a new one | def stop_and_restart():
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv) | [
"def terminate_process(self, upid):",
"def kill_application(self):\r\n self._runWidget.kill_process()",
"def stop(self):\n self.on_fg = False\n self.device.execute_command(f\"am force-stop {self.package_name}\",\n shell=True) \\\n .validate(Exce... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method to add sub command to parser | def add_sub_command(self):
pass | [
"def add_cmd(self, name, help=None, func=None):\n if self.subparsers is None:\n self.subparsers = self.add_subparsers(\n title=\"sub-commands\",\n help=help or 'sub-commands',\n )\n\n parser = self.subparsers.add_parser(\n name,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method to handle the subcommands | def handler(self, sub_command, args):
pass | [
"def add_sub_command(self):\n pass",
"async def handle_subcommand(self, ctx: context.SlashContext, data: dict):\n if data[\"data\"][\"name\"] not in self.subcommands:\n return\n base = self.subcommands[data[\"data\"][\"name\"]]\n sub = data[\"data\"][\"options\"][0]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load artificial gather row (dict) into GatherRow class | def make_GatherRow(gather_dict=None, exclude_cols=[]):
# default contains just the essential cols
gatherD = {'query_name': 'q1',
'query_md5': 'md5',
'query_filename': 'query_fn',
'name': 'gA',
'f_unique_weighted': 0.2,
'f_unique_to_query... | [
"def row_to_readings(row: dict, sensors: dict, awesome_sensors: dict, reading_types) -> iter:\n\n # Extract metadata (dimensions) common to all data columns\n time = row.pop('time')\n uf_sensor_id = row.pop('sensor')\n sensor = sensors[uf_sensor_id]\n awesome_sensor_id = awesome_sensors[sensor['name'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make TaxResult from artificial gather row (dict) | def make_TaxResult(gather_dict=None, taxD=None, keep_full_ident=False, keep_ident_version=False, skip_idents=None, LIN=False):
gRow = make_GatherRow(gather_dict)
taxres = TaxResult(raw=gRow, keep_full_identifiers=keep_full_ident,
keep_identifier_versions=keep_ident_version, lins=LIN)
... | [
"def make_QueryTaxResults(gather_info, taxD=None, single_query=False, keep_full_ident=False, keep_ident_version=False,\n skip_idents=None, summarize=False, classify=False, classify_rank=None, c_thresh=0.1, ani_thresh=None,\n LIN=False):\n gather_results = {}\n thi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make QueryTaxResult(s) from artificial gather information, formatted as list of gather rows (dicts) | def make_QueryTaxResults(gather_info, taxD=None, single_query=False, keep_full_ident=False, keep_ident_version=False,
skip_idents=None, summarize=False, classify=False, classify_rank=None, c_thresh=0.1, ani_thresh=None,
LIN=False):
gather_results = {}
this_queryta... | [
"def make_TaxResult(gather_dict=None, taxD=None, keep_full_ident=False, keep_ident_version=False, skip_idents=None, LIN=False):\n gRow = make_GatherRow(gather_dict)\n taxres = TaxResult(raw=gRow, keep_full_identifiers=keep_full_ident,\n keep_identifier_versions=keep_ident_version, lins=L... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test format for krona with multiple queries (normalize by n_queries) | def test_format_for_krona_summarization_two_queries():
# make gather results
# make mini taxonomy
gA_tax = ("gA", "a;b")
gB_tax = ("gB", "a;c")
taxD = make_mini_taxonomy([gA_tax,gB_tax])
gather_results = [{'query_name': 'queryA', 'name': 'gA', 'f_unique_weighted': 0.2,'f_unique_to_query': 0.2,... | [
"def test_QueryTaxResult_build_summarized_result_2():\n # make mini taxonomy\n gA_tax = (\"gA\", \"a;b\")\n gB_tax = (\"gB\", \"a;c\")\n taxD = make_mini_taxonomy([gA_tax, gB_tax])\n # make gather results\n gather_results = [{'query_name': 'queryA', 'name': 'gA', 'f_unique_weighted': 0.5,'f_unique... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test two queries, build summarized result for each | def test_QueryTaxResult_build_summarized_result_2():
# make mini taxonomy
gA_tax = ("gA", "a;b")
gB_tax = ("gB", "a;c")
taxD = make_mini_taxonomy([gA_tax, gB_tax])
# make gather results
gather_results = [{'query_name': 'queryA', 'name': 'gA', 'f_unique_weighted': 0.5,'f_unique_to_query': 0.5,'un... | [
"def summariseResult(self, test):",
"def test_format_for_krona_summarization_two_queries():\n # make gather results\n # make mini taxonomy\n gA_tax = (\"gA\", \"a;b\")\n gB_tax = (\"gB\", \"a;c\")\n taxD = make_mini_taxonomy([gA_tax,gB_tax])\n\n gather_results = [{'query_name': 'queryA', 'name'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test aggregate by lineage at rank | def test_aggregate_by_lineage_at_rank():
# make mini taxonomy
gA_tax = ("gA", "a;b")
gB_tax = ("gB", "a;c")
taxD = make_mini_taxonomy([gA_tax, gB_tax])
# make gather results
gather_results = [{'query_name': 'queryA', 'name': 'gA', 'f_unique_weighted': 0.5,'f_unique_to_query': 0.4,'unique_interse... | [
"def test_aggregate_by_lineage_at_rank_not_available():\n # make mini taxonomy\n gA_tax = (\"gA\", \"a;b\")\n gB_tax = (\"gB\", \"a;c\")\n taxD = make_mini_taxonomy([gA_tax, gB_tax])\n # make gather results\n gather_results = [{'query_name': 'queryA', 'name': 'gA', 'f_unique_weighted': 0.5,'f_uniq... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test aggregate by lineage at rank | def test_aggregate_by_lineage_at_rank_not_available():
# make mini taxonomy
gA_tax = ("gA", "a;b")
gB_tax = ("gB", "a;c")
taxD = make_mini_taxonomy([gA_tax, gB_tax])
# make gather results
gather_results = [{'query_name': 'queryA', 'name': 'gA', 'f_unique_weighted': 0.5,'f_unique_to_query': 0.4,'... | [
"def test_aggregate_by_lineage_at_rank():\n # make mini taxonomy\n gA_tax = (\"gA\", \"a;b\")\n gB_tax = (\"gB\", \"a;c\")\n taxD = make_mini_taxonomy([gA_tax, gB_tax])\n # make gather results\n gather_results = [{'query_name': 'queryA', 'name': 'gA', 'f_unique_weighted': 0.5,'f_unique_to_query': ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test two queries, aggregate by lineage at rank by query | def test_aggregate_by_lineage_at_rank_by_query():
# make mini taxonomy
gA_tax = ("gA", "a;b")
gB_tax = ("gB", "a;c")
taxD = make_mini_taxonomy([gA_tax, gB_tax])
# make gather results
gather_results = [{'query_name': 'queryA', 'name': 'gA', 'f_unique_weighted': 0.2,'f_unique_to_query': 0.2,'uniqu... | [
"def test_aggregate_by_lineage_at_rank():\n # make mini taxonomy\n gA_tax = (\"gA\", \"a;b\")\n gB_tax = (\"gB\", \"a;c\")\n taxD = make_mini_taxonomy([gA_tax, gB_tax])\n # make gather results\n gather_results = [{'query_name': 'queryA', 'name': 'gA', 'f_unique_weighted': 0.5,'f_unique_to_query': ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the raster nodata | def nodata(self):
return self._nodata | [
"def nodata_val():\n with rasterio.open(filenames[0]) as src:\n return src.nodata",
"def nodata_value(self):\n nodata_exists = c_int()\n value = capi.get_band_nodata_value(self.ptr, nodata_exists)\n return value if nodata_exists else None",
"def nodata_value(self):\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the raster wkt origin | def wkt_origin(self):
return self._wkt_origin | [
"def RasterXOrigin(self):\n return self._ImageShape__origin[0]",
"def get_origin(self):\n x, y = self._origin.xy\n return (-x, -y)",
"def getoriginx(self):\n return self.origin[0]",
"def origin(self):\n return self.bounds.origin()",
"def GetOrigin(self) -> \"itkPointD3 con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns dictionnary of raster primitives | def primitives(self):
return self._primitive_rasters.copy() | [
"def get_raster_list(self):\n return self.raw_raster_list",
"def _GetSurfaces(self):\n Surfaces = {}\n \n for node in self.svg.iter(): \n if node.get(inkex.addNS(\"Type\",\"TimeAnalysis\")) == \"Surface\":\n a_surface = self._ParseSurface(node)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
collects data from primitives | def _collect_data(self, to_collect):
# print(self.h, "collecting")
results = {}
for primitive in self._primitive_functions.keys():
results[primitive] = self._primitive_functions[primitive](to_collect[primitive])
return results | [
"def addPrimitive(primitive):",
"def parse_primitive(self) :\n return self.parse_one_of(\n self.parse_string_value,\n self.parse_integer_value,\n self.parse_float_value\n )",
"def __init__(self, data):\n if type(data) is not int and type(data) is not float a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get type of the monolith member's response | def type(self):
try:
if self and self._typestring in self.resp.dict:
return self.resp.dict[self._typestring]
# Added for object type
elif self and "type" in self.resp.dict:
return self.resp.dict["type"]
except (AttributeError, Va... | [
"def response_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"response_type\")",
"def response_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"response_type\")",
"def MemberType(self) -> _n_5_t_10:",
"def get_response_type(client):\n return client.service_contex... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the entire response of the monolith member | def resp(self):
return self._resp | [
"def get_all_member(requests):\n get_member_details = getMemberDetails()\n\n # print(get_member_details)\n\n final_resp = {\"ok\":True,'members':[]}\n final_resp = iterate(get_member_details, 0, (len(get_member_details) - 1), final_resp)\n print(json.dumps(final_resp))\n return JsonResponse(final_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get patches for the monolith member | def patches(self):
return self._patches | [
"def patch_map(self):\n return self.definition.patches",
"def patches(self):\n return multiplied(*[p.selection() for p in self._patches])",
"def get_patches(self):\n patches = []\n\n # 1. Insert count variable\n patches.extend(self._get_count_var())\n # 2. Insert specia... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set patches for the monolith member | def patches(self, val):
self._patches = val | [
"def set_patches(patches, pixels, patch_centers, offset, offset_index):\n if pixels.ndim != 3:\n raise ValueError(\n \"Only 2D images are supported but \" \"found {}\".format(pixels.shape)\n )\n\n patch_shape = patches.shape[-2:]\n # the [L]ow offset is the floor of half the patch ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the dictionary of the monolith member's response | def dict(self):
return self._resp.dict | [
"def dict_res(self):\n self._check_req()\n res_in_dict = json.loads(\"\".join(map(chr, self.response)))\n return res_in_dict",
"def _get_response_as_dict(self, response):\n\n response_dict = response.as_dict()\n response_dict.update({\n key: self.event[key]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the etag of the response | def etag(self):
return self.defetag if not self.resp else self.resp.getheader("etag") | [
"def etag(self):\n return cache.get(self.ETAG_KEY)",
"def getetag(self):\n return DAVElement.getetag (self.etag)",
"def etag(obj):\n etag = obj['ETag']\n if etag[0]=='\"':\n return etag[1:-1]\n return etag",
"def get_etag_elem(self, strict=True):\n return self.get_child_te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the directory_load flag | def directory_load(self, dir_load):
self._directory_load = dir_load | [
"def HandleLoadDirectory(self):\n fn = str(pg.QtGui.QFileDialog.getExistingDirectory(\n caption=\"Load from a directory\",\n directory=\"\"))\n if fn == '':\n return\n # load in all the files\n toLoad = PxpLoader.LoadPxpFilesFromDirectory(fn)\n # g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return monolith version type | def type(self):
return "Monolith.1.0.0" | [
"def type_version(self) -> str:\n return self._type_version",
"def UnderlyingSystemType(self) -> _n_2_t_4:",
"def python_type(self):",
"def RuntimeType(self) -> _n_0_t_0:",
"def type_version_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"type_version_id\")",
"def deter... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The urls visited by the monolith | def visited_urls(self):
return list(set(self._visited_urls) | set(self.paths.keys())) | [
"def visited_urls(self) -> Set:\n return self._visited_urls",
"def to_visit_urls(self) -> Any:\n return self._to_visit_urls",
"def return_urls(self):\n return self._return_urls",
"def getVisitedUrlCount(self):\n return len(self.visted)",
"def getUrlsCounter(self):\r\n retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the current client | def client(self, curr_client):
self._client = curr_client | [
"def set_client(self,client_data):\n from datetime import datetime\n client_data['updated_at'] = str(datetime.now())\n ret = self.clients.update({'name': client_data['name']}, client_data, True)",
"def set_default_client(cls, client):\n\n if not isinstance(client, cls):\n ra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Walks the entire data model and caches all responses or loads an individual path into the monolith. Supports both threaded and sequential crawling. | def load(
self,
path=None,
includelogs=False,
init=False,
crawl=True,
loadtype="href",
loadcomplete=False,
path_refresh=False,
json_out=False,
):
if init:
if LOGGER.getEffectiveLevel() == 40 and not json_out:
... | [
"def doPathWalk(self):\r\n self.path_dic = {0: 1} ### first step is the initial state before we've done anything\r\n self.end_point_dic = {} # initializing the dict that keeps track of all endpoints and their probabilities\r\n while len(self.path_dic): # ## the dict is used to keep track ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mark the paths to be modifed which are connected to current path. When calling this function you only need to include `opath`. | def markmodified(self, opath, path=None, modpaths=None):
modpaths = set() if modpaths is None else modpaths
path = path if path else opath
if not path:
return
modpaths.update(self.ctree[path] if path in self.ctree else set())
self.paths[path].modified = True
... | [
"def setInHoveredVertexPath(self, inPath: bool) -> None:\n ...",
"def setInFocusedVertexPath(self, inPath: bool) -> None:\n ...",
"def checkmodified(self, opath, path=None, modpaths=None):\r\n # return [paths for paths in self.ctree[path] if self.paths[paths].modified]\r\n modpaths =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the path or its children are modified. When calling this function you only need to include `opath`. | def checkmodified(self, opath, path=None, modpaths=None):
# return [paths for paths in self.ctree[path] if self.paths[paths].modified]
modpaths = set() if modpaths is None else modpaths
path = path if path else opath
newpaths = set()
if not path:
return
... | [
"def markmodified(self, opath, path=None, modpaths=None):\r\n modpaths = set() if modpaths is None else modpaths\r\n path = path if path else opath\r\n if not path:\r\n return\r\n modpaths.update(self.ctree[path] if path in self.ctree else set())\r\n self.paths[path].mo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Populate the collections type and types depending on resourcedirectory | def _populatecollections(self):
if not self._resourcedir in self.paths:
return
self.colltypes = defaultdict(set)
alltypes = []
colls = []
for item in self.paths[self._resourcedir].dict["Instances"]:
# Fix for incorrect RDir instances.
i... | [
"def initialize_collections(self):\n COLLECTIONS = self.getCollectionsNames()\n\n try:\n collections_dict = {}\n for collection in COLLECTIONS:\n collections_dict[collection] = self.build_collection(collection)\n return collections_dict\n except E... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deserialize Metrica Sports event data json format into a `EventDataset`. | def deserialize(
self, inputs: Dict[str, Readable], options: Dict = None
) -> EventDataset:
self.__validate_inputs(inputs)
if not options:
options = {}
with performance_logging("load data", logger=logger):
raw_events = json.load(inputs["event_data"])
... | [
"def deserialize(\n self, inputs: Dict[str, Readable], options: Dict = None\n ) -> EventDataset:\n self.__validate_inputs(inputs)\n if not options:\n options = {}\n\n with performance_logging(\"load data\", logger=logger):\n f7_root = objectify.fromstring(inputs[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Permuta os elementos de duas listas, 'a' e 'b', criando uma lista das listas de combinacoes possiveis. | def permut2lists(a,b):
solucao = []
for i,j in itertools.product(a,b):
solucao.append([i,j])
return solucao | [
"def unique_comb_of_two_lists(A, B):\n res = []\n for p in permutations(A, len(B)):\n zipped = zip(p, B)\n res.append(list(zipped))\n return res",
"def list_union(a, b):\n c = list(copy(a))\n for item in b:\n if item not in a:\n c.append(item)\n return c",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DESData(desfilename=None) Read in DES catalog data. | def __init__(self, desfilename=None):
Dataset.__init__(self, desfilename, "DESData", '')
self.readin() | [
"def readData(self, det):\n f = open(self.file, \"rb\")\n fortran.skip(f) # Skip header\n for _ in range(2 * det):\n fortran.skip(f) # Detector Header & Data\n fortran.skip(f) # Detector Header\n data = fortran.read(f)\n f.close()\n return data",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
readin() Read in DES catalog data from FITS or .npy files. | def readin(self):
if self.filename.endswith('.fits'):
# Assumes Science Verification data
self.read_SV_fits()
elif self.filename.endswith('.npz'):
# Assumes DES Y3 Gold data
self.read_Y3_2_2_npz()
else:
print('Unrecognized file type: ' + self.filename) | [
"def __init__(self, desfilename=None):\n\n Dataset.__init__(self, desfilename, \"DESData\", '')\n \n self.readin()",
"def read(num,ext=0,bias=True,verbose=False, formstr=None) :\n\n if type(num) == str :\n file=num\n else :\n if formstr is None : formstr=det.formstr\n file=glob... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
plot_item(self, m, ind, x, r, k, label, U, rerr, feature_weights) Borrowed from UCIDataset. Plot selection m (index ind, data in x) and its reconstruction r, with k and label to annotate of the plot. U and rerr are here ignored. Could use them to plot a projection into the first two PCs' space (see dataset_libs.py). If... | def plot_item(self, m, ind, x, r, k, label, U, rerr, feature_weights):
if len(x) == 0 or len(r) == 0:
print("Error: No data in x and/or r.")
return
# Select the features to plot
if len(feature_weights) > 0:
goodfeat = [f for f in range(len(feature_weights)) \
... | [
"def Item(self) -> EPlotAttribute:",
"def plot(self, outfile='sparsegrid_plot.png', labels=None, highlight=None):\n assert plt_loaded, 'Matplotlib not installed'\n assert self.dim == 2 or self.dim == 3, 'Plotting only implemented in 2d/3d'\n fig = plt.figure(figsize=(10, 5))\n if self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the cache timestamp.txt log with a new timestamp | def update_timestamp(cache_path: str) -> None:
timestamp = datetime.datetime.now()
file = open(cache_path + "timestamp.txt", "a")
file.write("\n{}".format(timestamp))
file.close() | [
"def update_log_file(filename):\n try:\n log_file = open(filename,'r')\n new_cotent = \"\"\n lines = log_fil.readlines()\n for line in lines:\n try:\n line.strip()\n current_time = datetime.datetime.now()\n infor = line.split()\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets self._lock to True preventing cleanup. | def lock(self) -> None:
self._locked = True | [
"def lock(self):\n self.locked = True",
"def release(self):\n if self._lock.locked():\n self._lock.release()",
"def release(self):\n #print \"RELEASING LOCK\"\n self.locked = False\n if self.timer:\n self.timer.cancel()",
"def attempt_delete(self):\n if se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates cache directory with self.id (private). | def _generate_directory(self) -> None:
if os.path.isdir(self._base_dir):
raise WorkerCacheError(
message="directory {} already exists. Check __del__ and self.id methods".format(
self._base_dir
)
)
os.makedirs(self._base_dir)
... | [
"def create_cache_dir(self):\n if not os.path.isdir(self.dir):\n # Make the dir\n os.makedirs(self.dir)",
"def _create_cache_directory_for_id(key, id_):\n cache_dir = os.path.join(\n _create_cache_directory(key), str(id_)\n )\n if os.path.exists(cache_dir) and os.path.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fires when self is deleted, deletes the directory. | def __del__(self):
self._delete_directory() | [
"def delete(self):\n if os.path.isdir(self.path):\n shutil.rmtree(self.path)\n else:\n os.remove(self.path)",
"def _delete_working_dir(self):\n print(\"delete\")\n if os.path.exists(self.path):\n shutil.rmtree(self.path)",
"def cleanup(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search for a word within the Trie. If the word is not found, return None, else return the containing TrieNode | def search(self, word):
node = self
for c in word:
if c not in node.children:
return None
else:
node = node.children[c]
return node.word | [
"def get_node(self, word: str) -> t.Optional[TrieNode]:\n current_node = self.trie.root\n current_word = word\n while True:\n # if we emptied the word, it means we found it and we know the node\n if len(current_word) == 0:\n return current_node\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search for a node within the Trie and returns its children. | def getChildren(self, word):
node = self
for c in word:
if c not in node.children:
return None
else:
node = node.children[c]
if len(node.children) > 0:
return node.children
return None | [
"def searchNode(self, value, child=None):\n if not value: assert(Exception(\"Cannot search with a empty `value`\"))\n child = child if child is not None else self\n if child.name == value:\n return child\n else:\n children = child.children\n if len(childr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a word starting from location (sR,sC) and moving in direction specified by dR and dC. | def addWord(self, word, sR, sC, dR, dC):
cR = sR
cC = sC
# Check if we're going out of bounds
if ((cR + (len(word) * dR)) < 0 or
(cC + (len(word) * dC)) < 0 or
(cR + (len(word) * dR)) > self.rows or
(cC + (len(word) * dC)) > self.cols):
return... | [
"def addword(T, w):\n \n #FIXME\n pass",
"def add_word(self,word,d):\n w=word.lower() \n # if w not in stop_words:\n # ws=stemmer.stem(w,0,len(w)-1)\n ws = w\n d.setdefault(ws,0)\n d[ws] += 1",
"def add_word(self, player, name, word, coords): # word: 'CAT', coords: [(7, 7), (7,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if a word will fit starting from location (sR,sC) and moving in direction specified by dR and dC. | def checkWord(self, word, sR, sC, dR, dC):
cR = sR
cC = sC
# Check if we're going out of bounds
if ((cR + (len(word) * dR)) < 0 or
(cC + (len(word) * dC)) < 0 or
(cR + (len(word) * dR)) > self.rows or
(cC + (len(word) * dC)) > self.cols):
retu... | [
"def fits(self, word, r, c, direction):\n\n #create a check to see if the string is the length of the word\n check = 0\n\n #check to see if the square is labeled\n if self._grid[r][c].getNumber() > 0:\n\n #call the availableSpace method to see if a space is available the\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build graph and assign device. | def build_graph_and_assign_device(self):
self._agents = {}
self._init_agent_ops = []
self._device_name = {}
for i, worker in enumerate(self._worker_names):
hparam = self._hparams[worker]
if self._devices:
device = '/gpu:' + str(i % len(self._devices))
else:
device = '/c... | [
"def build_graph(self):\r\n self._create_placeholders()\r\n self._create_network()\r\n self._create_loss()\r\n self._create_optimizer()\r\n self._create_summaries()\r\n self._show_current_model()",
"def _build_graph(self):\n self.op_size = len(self._ops)\n o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Log some stats if train_step is a multiple of log_interval. | def _maybe_log_train(self, train_step, total_loss, agent_name):
if train_step % self._log_interval == 0:
logging.info('agent %s', agent_name)
logging.info('step = %d, loss = %f', train_step, total_loss.loss) | [
"def hook_post_train(self) -> None:\n self.logger.info(f\"In total, training {self.state.current_epoch} epochs took \"\n f\"{self.state.time_total:.3f}s ({self.state.time_total - self.state.time_val:.3f}s \"\n f\"train / {self.state.time_val:.3f}s val)\")",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create pypolicy and train op. | def create_pypolicy_and_train_op(self):
self._collect_py_policies = {}
self._select_py_policies = {}
self._rb_iterator = {}
for i in range(len(self._worker_names)):
worker = self._worker_names[i]
agent = self._agents[worker]
device = self._device_name[worker]
with tf.device('/cpu... | [
"def build_policy_network_op(self, scope = \"policy_network\"):\n #######################################################\n ######### YOUR CODE HERE - 8-12 lines. ############\n\n # logprob: log π_θ(a_t|s_t)\n if self.discrete:\n action_logits = build_mlp(self.observation_placeholder,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update rb metric checkpointer. | def update_rb_metric_checkpointer(
self, use_common=False): # iteration_metrics=self._iteration_metric,
logging.info('updating rb and metric checkpointer with common=%s',
use_common)
self._metric_checkpointer = {}
self._rb_checkpointer = {}
# all_iterable={('behavior_metrics_'+wo... | [
"def db_update_metrics():\n db_put_metrics(get_metric_list())",
"def _update_trainer_metrics(self, metric_name: str, new_value: torch.tensor) -> None:\n self._trainer_metrics[metric_name] = self._trainer_metrics.get(metric_name, Average())\n self._trainer_metrics[metric_name](new_value.detach().c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update train bandit checkpointer. | def update_train_bandit_checkpointer(self,
update_bandit=True,
use_common=False):
self._train_checkpointer = {}
self._bandit_checkpointer = {}
logging.info('updating train and bandit checkpointer with common=%s',
... | [
"def train(self) -> None:\r\n\r\n self.training = True",
"def update(self):\n # ic()\n # self.update_scans()\n self.update_data()",
"def train(self):\n self.train_state = True",
"def update_rb_metric_checkpointer(\n self, use_common=False): # iteration_metrics=self._iterat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collect initial experience before training begins. | def _initial_collect(self):
logging.info('Collecting initial experience...')
time_step_spec = ts.time_step_spec(self._env.observation_spec())
random_policy = random_py_policy.RandomPyPolicy(time_step_spec,
self._env.action_spec())
time_step = self._env... | [
"def before_epoch(self):\n self.model.train()",
"def _train(self):\n self._model.learn(total_timesteps=self._num_timesteps)",
"def prepare_data(self) -> None:\n self.source = ExperienceSource(self.env, self.agent)\n self.buffer = ReplayBuffer(self.replay_size)\n self.populate(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Record summaries if env_step is a multiple of summary_interval. | def _maybe_record_behavior_summaries(self, env_step, worker_name):
if env_step % self._summary_interval == 0:
for metric in self._behavior_metrics[worker_name]:
add_summary(self._train_file_writers[worker_name],
'Metrics/' + metric.name, metric.result(), env_step) | [
"def on_step_end(self, episode_step, logs):\n self.step += 1\n self.loss.append(logs.get(\"metrics\")[0])\n\n if not self.step % self.interval:\n y_pred = make_predictions(self.model.target_model, self.X_val)\n stats = calculate_metrics(self.y_val, y_pred)\n\n i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests on all test data points for a bias c, and training input K_inv, gamma_train (trained with l) | def test_proc(train,test, test_r, K_inv, gamma_train, perm_matrices, c,T):
nr_test_queries = test_r.shape[0]
shape_1 = train.shape
AP = 0
j = 0
for i in range(nr_test_queries):
j+= 1
x = np.expand_dims(test[i, :, :], axis=0)
shape_x = x.shape
x = np.reshape(x, (shap... | [
"def run(X_train, y_train, X_test, y_test, _k=[1]):\n # Compute distances:\n dists = mlBasics.compute_euclidean_distances(X_train, X_test)\n\n print \"Distances computed\"\n\n # For all k,\n for k in _k:\n\n # Predict labels\n y_test_pred = mlBasics.predict_labels(dists, y_train, k=k)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update a page with its anagrams. Returns whether changes were made. | def update_page(title: str, alphagram: str) -> bool:
page = pywikibot.Page(SITE, title)
create_diff(page.text, page)
anagrams_to_add = get_anagrams(title, alphagram)
new_content, added_anagrams = add_anagrams(page.text, anagrams_to_add, alphagram)
new_content = re.sub("\n{3,}", "\n\n", new_con... | [
"def updatePage(self, page_info):\n pass",
"def update(self, kwargs):\n title = kwargs.get(\"page\")\n if not title:\n return\n\n title = title.replace(\"_\", \" \").decode(\"utf8\")\n query = \"SELECT page_id, page_modify_oldid FROM page WHERE page_title = ?\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the fs_type of this V1ScaleIOVolumeSource. | def fs_type(self, fs_type):
self._fs_type = fs_type | [
"def f_type(self, f_type):\n \n self._f_type = f_type",
"def _get_fstype_from_parser(self, fstype=None):\n if not fstype:\n if self.index in self.disk.parser.fstypes:\n fstype = self.disk.parser.fstypes[self.index]\n elif '*' in self.disk.parser.fstypes:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the gateway of this V1ScaleIOVolumeSource. | def gateway(self, gateway):
if self.local_vars_configuration.client_side_validation and gateway is None: # noqa: E501
raise ValueError("Invalid value for `gateway`, must not be `None`") # noqa: E501
self._gateway = gateway | [
"def set_default_gateway(self, gateway):\n self.update(default_gateway=gateway)",
"def host_gateway(self, value):\n\n self._host_gateway.set(value)",
"def set_gateway_class(gateway_class):\n GatewayProvider._gateway_class = gateway_class",
"def _config_gateway(self, network):\n gat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the protection_domain of this V1ScaleIOVolumeSource. | def protection_domain(self, protection_domain):
self._protection_domain = protection_domain | [
"def setDomain(self, domain):\n self[Header.PARAM_DOMAIN] = domain",
"def domain_path(self, domain_path):\n\n self._domain_path = domain_path",
"def domain(self, domain):\n\t\tself.domain = domain",
"def _set_wrapper_domain_param(self, domain_param: dict):\n # Cast to integer for consiste... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the read_only of this V1ScaleIOVolumeSource. | def read_only(self, read_only):
self._read_only = read_only | [
"def setReadOnly(self, readOnly):\n self.listener.setReadOnly(readOnly)",
"def readonly(self, readonly):\n \n self._readonly = readonly",
"def read_only(self):\n\n self._read_only = True",
"def set_read_only(self, network_id, value):\n return self.set_network_system_properti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the secret_ref of this V1ScaleIOVolumeSource. | def secret_ref(self, secret_ref):
if self.local_vars_configuration.client_side_validation and secret_ref is None: # noqa: E501
raise ValueError("Invalid value for `secret_ref`, must not be `None`") # noqa: E501
self._secret_ref = secret_ref | [
"def secret_ref(self) -> Optional[pulumi.Input['ThanosRulerSpecVolumesStorageosSecretRefArgs']]:\n return pulumi.get(self, \"secret_ref\")",
"def secret_ref(self) -> Optional[pulumi.Input['PrometheusSpecVolumesStorageosSecretRefArgs']]:\n return pulumi.get(self, \"secret_ref\")",
"def storage_secr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the ssl_enabled of this V1ScaleIOVolumeSource. | def ssl_enabled(self, ssl_enabled):
self._ssl_enabled = ssl_enabled | [
"def enable_ssl(self, value):\n self._set_property('enable_ssl', value)",
"def configSSL(self, secport=636, secargs=None):\n return self.config.enable_ssl(secport, secargs)",
"def enable_ssl(self):\n # type: () -> bool\n return self._get_property('enable_ssl')",
"def enable_https_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the storage_mode of this V1ScaleIOVolumeSource. | def storage_mode(self, storage_mode):
self._storage_mode = storage_mode | [
"def _SwitchStorageMode(self):\n operation = 'Switch ATFA device to Storage Mode'\n self._SendOperationStartEvent(operation)\n self.PauseRefresh()\n\n try:\n self.atft_manager.SwitchATFAStorage()\n except DeviceNotFoundException as e:\n e.SetMsg('No Available ATFA!')\n self._HandleExce... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the storage_pool of this V1ScaleIOVolumeSource. | def storage_pool(self, storage_pool):
self._storage_pool = storage_pool | [
"def set_pool(self, value):\n self.gui.spn_pool.setValue(value)",
"def pool_volume(self, pool_volume):\n if pool_volume is None:\n raise ValueError(\"Invalid value for `pool_volume`, must not be `None`\") # noqa: E501\n\n self._pool_volume = pool_volume",
"def storagePoolDefineX... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the volume_name of this V1ScaleIOVolumeSource. | def volume_name(self, volume_name):
self._volume_name = volume_name | [
"def volume_rename(self, volume, new_volume_name):\n return self.request( \"volume-rename\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'new_volume_name': [ new_volume_name, 'new-volume-name', [ basestring, 'None' ], False ],\n }, {\n } )",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fact Manager This is initialized in a lazy way to increase overall startup speed. | def fact_manager(self) -> FactManager:
if not self._fact_manager:
# Instantiate Global Fact Manager
self._fact_manager = FactManager()
return self._fact_manager | [
"def __init__(self):\n\n self.clusterTableManager = ClusterTableManager()\n self.docManager = DocManager()\n self.processedClusterStore = ProcessedClusterStore()",
"def __init_manager(self):\n try:\n self._db_info_cache = pd.read_hdf(\n self._hdf5_filepath, DA... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test ChunkEvaluator with name=chunk | def test_ChunkEvaluator1():
# 初始化chunck-level的评价管理。
metric = fluid.metrics.ChunkEvaluator(name="chunk")
# 假设模型预测10个chuncks,其中8个为正确,且真值有9个chuncks。
num_infer_chunks = 10
num_label_chunks = 9
num_correct_chunks = 8
metric.update(num_infer_chunks, num_label_chunks, num_correct_chunks)
numpy_... | [
"def test_chunker(self):\n chunker = StringChunker(Protocol.sieve_function)\n\n for sample in self.valid_samples:\n self.assert_chunker_sample(chunker, sample)\n self.assert_chunker_sample_with_noise(chunker, sample)\n self.assert_chunker_fragmented_sample(chunker, sam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |