query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Returns the Guide data used by the Rig Component to define the layout of the final rig. | def getRigBuildData(self):
data = super(SimpleControlComponentGuide, self).getRigBuildData()
data["ctrlSize"] = self.ctrlSizeInputAttr.getValue()
data["ctrlXfo"] = self.mainCtrl.xfo
return data | [
"def getRigBuildData(self):\n\n data = super(InsectLegComponentGuide, self).getRigBuildData()\n\n numJoints = self.numJoints.getValue()\n\n # Calculate FW\n toFirst = self.jointCtrls[0].xfo.tr.subtract(self.jointCtrls[1].xfo.tr).unit()\n toTip = self.jointCtrls[0].xfo.tr.subtract(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enables introspection of the class prior to construction to determine if it is a guide component. | def getComponentType(cls):
return 'Guide' | [
"def if_guides_exist(self):\n pass",
"def get_guide_type(guide):\n # Maintained by naming convention in the Blender files. Sub-optimal.\n try:\n return guide.name[guide.name.rindex(\".\") + 1:]\n except:\n return None",
"def isClassAdvisor(ob):\n return isinstance(ob,FunctionTyp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the corresponding rig component class for this guide component class | def getRigComponentClass(cls):
return SimpleControlComponentRig | [
"def getRigComponentClass(cls):\n\n return InsectLegComponentRig",
"def getRigComponentClass(cls):\n\n return FabriceSpineRig",
"def getComponentType(cls):\n\n return 'Guide'",
"def get_component(self, cls):\n components = self.get_components(cls)\n return components[0] if l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load csv file to database. Add `year` column | def from_csv_to_database():
for year, path in FileNamePath.items():
# load csv files
with open(path, encoding='cp1251') as dataset:
print(f"Download {year} data")
get_curr_data(dataset, year) | [
"def import_data(self, year):\n \n #Take the raw csv file for each month, process it based on parameters, and append it to the dataframe to be exported to the db.\n\n import_cols = ['FL_DATE', 'UNIQUE_CARRIER', 'OP_UNIQUE_CARRIER', 'AIRLINE_ID', 'OP_CARRIER_AIRLINE_ID', \n 'TAIL_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save csv file with given header and rows into output folder | def to_csv(header, rows):
with open('result.csv', 'w') as result:
result_writer = csv.writer(result, delimiter=';')
result_writer.writerow(header)
result_writer.writerows(rows) | [
"def write_csv(header, table_data, output_file):\r\n with open(output_file, \"a+\", newline=\"\") as file:\r\n writer = csv.writer(file)\r\n if not os.path.exists(output_file) or os.path.getsize(output_file) == 0:\r\n writer.writerow(header)\r\n writer.writerows(table_data)\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return age and the averages of size and intensity. | def calculate(data, data_top):
size, intensity, age = np.array([data["Size"]]), np.array([data["Intensity"]]), data_top.iat[1,0]
size_avg, intensity_avg = np.average(size), np.average(intensity)
return size_avg, intensity_avg, age | [
"def get_average_age(self):\n return np.mean([agent.age for agent in self.agents])",
"def mean_age(self):\n return np.mean([p.age for p in self.people])",
"def get_average_age():\n i = 0\n sum = 0\n query = Dob.select(Dob.age).join(Person, on=(Dob.person_id == Person.person_id)) \n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Locates the flags in the resource Calls the LineFinder class in order | def getting_flags_locations(self):
print(self.flags)
self.line_finder.find_line(self.html) | [
"def find_flags(self, name, elements, start, end):\n try:\n self.logger.debug(\"[*] Finding flags in vicinity of \" + name )\n file_name = self.current_file + \".i\"\n last_tup=len(self.flag_descriptions[file_name])\n #for flags after the struct\n max_li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add url domain field to each tweet in each user data object. Url domain field contains list of domains corresponding to list of urls. | def modify_user_data(user_d_list):
for user in user_d_list:
for tweet in user['tweets']:
domains = [get_domain_of_url(url) for url in tweet['urls']]
tweet['domains'] = domains
return | [
"def setTweetUrls(self):\n self.urls = [u[\"url\"] for u in self.tweet[\"entities\"][\"urls\"]]",
"def url_domains(self, url_domains):\n self._url_domains = url_domains",
"def getTweetUrls(self):\n return self.urls",
"def update_domain():\n\n for e in Expr.search() + User.search(): e.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function takes in all paths that are represented as lists of consecutive nodes [node1, node2,...,nodeN] and converted to paths represented as lists of consecutive relations [rel1, rel2,...,relM] if self.include_entity is false, or as lists of nodes and relations [node1, rel1, node2, rel2,...,relM, nodeN] if self.i... | def expand_paths_by_nodes(self, paths):
paths_formatted = set()
# Expand each path
for path in paths:
if len(path) < 2:
continue
expanded_paths = set()
if self.include_entity:
relations_for_each_step = [[path[0]]]
el... | [
"def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.ap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is used to write all paths between any two entities that are connected by the input relation to a file. Because this function will go through all paths node by node, this function will also used to filter paths to save computation. | def write_and_filter_paths(self, source, target, relation, label, paths):
file_dir = os.path.join(self.save_dir, relation + "_" + str(self.maximum_length) + "_" + str(self.remaining_percentage) + "_" + str(self.random_seed) + ".txt")
with open(file_dir, "a") as fh:
fh.write(str(label) + "\t"... | [
"def writePathways( self ):\n\n self.logger.info( 'writePathways: START' )\n\n # Generate inserts for meabolic pathways.\n self.importerPathway.writePathways()\n\n self.logger.info( 'writePathways: DONE' )",
"def filter_paths(self, paths):\n formatted_paths = set()\n for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is used to filter all paths and change paths represented by relation index and entity index to paths represented by relation name and entity name | def filter_paths(self, paths):
formatted_paths = set()
for path in paths:
formatted_path = []
if self.include_entity:
if len(path) == 3:
continue
formatted_path.append(self.idx_to_node[path[0]].get_name())
for rd... | [
"def write_and_filter_paths(self, source, target, relation, label, paths):\n file_dir = os.path.join(self.save_dir, relation + \"_\" + str(self.maximum_length) + \"_\" + str(self.remaining_percentage) + \"_\" + str(self.random_seed) + \".txt\")\n with open(file_dir, \"a\") as fh:\n fh.write... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert single line in Instruction instance. | def process_line(line: str) -> Instruction:
register, op, value, _, base, check, limit = line.split()
return Instruction(register, op, int(value), base, check, int(limit)) | [
"def parse_line(line: str) -> Instruction:\n mtch = INSTRUCTION_RE.match(line)\n assert mtch is not None\n\n operation = VALUE_TO_OPERATION[mtch.group(\"operation\")]\n argument = int(mtch.group(\"argument\"))\n return Instruction(operation=operation, argument=argument)",
"def _convert_instruction(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert raw data in the easytouse list of Instruction instances. | def process_data(data: str) -> list[Instruction]:
instructions = []
for line in data.strip().split("\n"):
instruction = process_line(line)
instructions.append(instruction)
return instructions | [
"def transform(self, data_inst):\n return data_inst",
"def test_convert_instructions(self):\n inst = Instructions()\n inst.write_metadata = (0x56, 0xff)\n self.assertEqual(instructions_from_ovs(\"write_metadata:0x56/0xff\", {}), inst)\n\n inst = Instructions()\n inst.goto... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply all instructions and return registers + the biggest value seen. | def perform_instructions(
instructions: list[Instruction],
) -> tuple[DefaultDict[str, int], int]:
registers: DefaultDict[str, int] = defaultdict(int)
biggest = 0
for instruction in instructions:
update = OPERATORS[instruction.op]
check = OPERATORS[instruction.check]
register = ... | [
"def ms_reduce(arr):\n def r(acc, curr):\n max_local, max_global = acc\n max_local = max(max_local + curr, curr)\n max_global = max(max_global, max_local)\n return max_local, max_global\n\n _, max_global = reduce(r, arr, (float('-inf'), float('-inf')))\n return max_global",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a URL to IDN notation | def _convert_to_idn(url):
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urllib.parse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
... | [
"def _convert_to_idn(url):\r\n # this function should only be called with a unicode string\r\n # strategy: if the host cannot be encoded in ascii, then\r\n # it'll be necessary to encode it in idn form\r\n parts = list(urlparse.urlsplit(url))\r\n try:\r\n parts[1].encode('ascii')\r\n except... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
One epoch is a single tournament here | def one_epoch(self, tournament_id: int, epoch=0):
# TODO: tournament pre-fetcher
tournament = Tournament(tournament_id, cache=self.cache)
# Measure correlation before to see whether gradient update took effect
correlation_before = self.get_prediction_correlation(tournament)
cor... | [
"def train_one_epoch(self):\n raise NotImplementedError",
"def training(self): #every player has a training to follow",
"def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get scores for all the teams | def get_scores(self, tournament: Tournament):
self.model.eval()
# collate_fn = lambda x: collate_teams(x, tournament.max_members)
dl_rank = DataLoader(tournament.ranking, num_workers=self.jobs, batch_size=self.bs, shuffle=False)
iterator = tqdm(dl_rank, position=0, desc=f'{tournament.tou... | [
"def league_scores(self):\n date = datetime.datetime.strftime(self.date, \"%Y%m%d\")\n url = f\"{self.base_url}{self.season}-regular/scoreboard.json?fordate={date}\"\n scores = self.api_request(url)\n return scores['scoreboard']['gameScore']",
"def get_per_game_data(self, func):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return comments tree by entity or root comment | async def get_comments_tree(request):
comment_id = request.match_info.get('comment_id')
if comment_id:
# valitation was in route (\d+)
comment_id = int(comment_id)
tree = CommentsTreeDAO.create_by_parent(comment_id)
else:
entity_type = request.match_info.get('entity_type')
... | [
"def thread(comments):\r\n \r\n ret = {'root': []}\r\n for comment in comments:\r\n if not comment.parent_id:\r\n ret['root'].append(comment)\r\n else:\r\n if comment.parent_id not in ret:\r\n ret[comment.parent_id] = []\r\n ret[comment.parent_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks to see if word is in dictionary, then checks if homophones | def word_check(word):
word1 = word[1:]
if word1 not in word_dict: return False
if not homophones (word, word1): return False
word2 = word[0] + word[2:]
if word2 not in word_dict: return False
if not homophones(word, word2): return False
return True | [
"def homophone_words(word_one, word_two, pron_dict):\n if word_one not in pron_dict or word_two not in pron_dict:\n return False\n return pron_dict[word_one] == pron_dict[word_two]",
"def isWord(word, dictionary):\n return word in dictionary",
"def is_hindi(word):\r\n\twordlist = []\r\n\twith op... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the positions from trace | def posns_from_trace(trace):
posns = []
for i in range((len(trace.variables)-1)//2):
var_x = trace.variables[2*i]
var_y = trace.variables[2*i+1]
car_i = int(var_x.name.split('_')[2])
xy = (var_x.value.item(), var_y.value.item())
if len(posns) <= car_i:
pos... | [
"def get_trace(self):\n x, y = self.get_pos()\n if (x == self.x_vec[-1]) and (y == self.y_vec[-1]):\n return self.x_vec, self.y_vec\n else:\n return [*self.x_vec, x], [*self.y_vec, y]",
"def __make_pair_wise_relative_positions(self) ->None:\n indexes: torch.Tensor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parts should call draw on its child parts. It should determine if a change has been made, and if so, make the change and call update. If a part has pasted outside its region, it should return True Parts should not make changes to the display until draw has been called! This is because the order parts are drawn is impor... | def draw(self, force=False):
self.display.draw(force) | [
"def draw(self, force = False):\n\t\tpass",
"def _draw(self):\r\n if self.changed or self.alwaysDirty:\r\n self.on_draw()\r\n self.changed = False\r\n return",
"def _onPaint(self, evt):\n if not self._isRealized:\n self.realize()\n if self._drawn < 2:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if the coord is in Part or any of its children. May be a better idea to call the get_part_containing function instead though, which returns the lowest level Part that contains the coord (none of its children contain the coord, but the Part does) | def contains(self, coord):
# print(coord, self.position, self.size)
return (0 <= coord[0] - self.position[0] < self.size[0] and
0 <= coord[1] - self.position[1] < self.size[1]) | [
"def get_part_containing(self, coord):\n # print('in', self)\n for k, child in self.children.items():\n # print('try', k, child)\n if child.ignore:\n # print('ignore', k, child)\n continue\n if child.contains(coord):\n # pri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the lowest Part that contains the coord (a part that contains the coord where none of its children contain the coord) Assumes that self already contains coord! Please check this if you are not sure! | def get_part_containing(self, coord):
# print('in', self)
for k, child in self.children.items():
# print('try', k, child)
if child.ignore:
# print('ignore', k, child)
continue
if child.contains(coord):
# print('contained... | [
"def smallest_containing_location(self, geom, location_type_slug='neighborhoods'):\n try:\n return Location.objects.filter(location_type__slug=location_type_slug, location__intersects=geom, is_public=True).order_by('-area')[0]\n except IndexError:\n return None",
"def find_lowe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
splink score histogram diagnostic plot public API function Compute a histogram using the provided buckets and plot the result. | def splink_score_histogram(
df_e: DataFrame,
spark: SparkSession,
buckets=None,
score_colname=None,
symmetric=True,
):
rows = _calc_probability_density(
df_e,
spark=spark,
buckets=buckets,
score_colname=score_colname,
symmetric=symmetric,
)
retur... | [
"def draw_histogram(scores):\n maximum = max(scores)\n minimum = min(scores)\n bin_numbers = np.linspace(minimum, maximum, NUMBER_OF_HIST_BINS)\n fig = plt.figure()\n fig.suptitle(\"Histogram of pair scores\", fontsize=14)\n ax = fig.add_subplot(111)\n plt.hist(scores, bin_numbers, color=\"#000... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert an ascii format PSD to XML. | def _convert_psd(self, ascii_format, ifo):
command = ["convert_psd_ascii2xml",
"--fname-psd-ascii", f"{ascii_format}",
"--conventional-postfix",
"--ifo", f"{ifo}"]
pipe = subprocess.Popen(command,
std... | [
"def to_string(self):\n xmldom = xml.dom.minidom.parseString(ET.tostring(self.pcd))\n xmlstr = xmldom.toprettyxml(indent=\" \", encoding=\"UTF-8\")\n return xmlstr",
"def to_xml(self):\n # lines = super(FileCatNoEmpty, self).cat(filepath)\n structure = super(Point, self).to_xm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct a DAG file in order to submit a production to the condor scheduler using util_RIFT_pseudo_pipe.py | def build_dag(self, user=None):
cwd = os.getcwd()
#os.chdir(self.production.event.meta['working directory'])
#os.chdir(os.path.join(self.production.event.repository.directory,
# self.category))
if self.production.event.repository:
gps_file = self... | [
"def create(\n metadata: ProjectMetadata, pipeline_name, env, target_path\n): # pylint: disable=too-many-locals\n loader = jinja2.FileSystemLoader(str(Path(__file__).parent))\n jinja_env = jinja2.Environment(autoescape=True, loader=loader, lstrip_blocks=True)\n jinja_env.filters[\"slugify\"] = slugify\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Submit a DAG file to the condor cluster (using the RIFT dag name). This is an overwrite of the near identical parent function submit_dag() | def submit_dag(self):
os.chdir(self.production.rundir)
os.system("cat *_local.cache > local.cache")
for psdfile in self.production.get_psds("xml"):
ifo = psdfile.split("/")[-1].split("_")[1].split(".")[0]
os.system(f"cp {psdfile} {ifo}-psd.xml.gz")
self.before_... | [
"def submit_dag(config, dag_file):\n with SUBMIT_LOCK:\n try:\n condor_dag_cmd = osp.join(get_condor_bin_dir(config),\n CONDOR_COMMAND['dag'])\n\n pipe = subprocess.Popen(args=(condor_dag_cmd, '-force', dag_file),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collect all of the log files which have been produced by this production and return their contents as a dictionary. | def collect_logs(self):
logs = glob.glob(f"{self.production.rundir}/*.err") #+ glob.glob(f"{self.production.rundir}/*/logs/*")
logs += glob.glob(f"{self.production.rundir}/*.out")
messages = {}
for log in logs:
with open(log, "r") as log_f:
message = log_f.rea... | [
"def collect_logfiles(self):\n pass",
"def getAllEntries(self):\n \n log_entries_dict = collections.defaultdict(list)\n for logfile in os.listdir(self.log_folder):\n log = os.path.join(self.log_folder, logfile)\n with open(log, 'rb') as l:\n logCSVr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the sbatch file in order to combine genomics.vcf samples contained in current_batch in a single one. | def build_GenotypeGVCFs_sbatch(working_dir, combined_gvcf_files, scratch=False, interval=None):
name_batch1 = os.path.basename([item for item in combined_gvcf_files if "batch1" in item][0])
interval_name = ""
#there must be at least one batch so look for it, not elegant but works
if name_batch1.s... | [
"def GenotypeGVCFs():\n #creates sbatch files to merge batches of batch_size genomics vcf\n cwd = os.getcwd()\n sbatch_files = []\n if not os.path.isdir(os.path.join(cwd, \"01_CombineGVCFs\")):\n sys.exit(\"Directory 01_CombineGVCFs does not exits exists, something went wrong here.\")\n if os.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs GenotypeGVCFs on all combined files produced previosuly (assumes folder structure) | def GenotypeGVCFs():
#creates sbatch files to merge batches of batch_size genomics vcf
cwd = os.getcwd()
sbatch_files = []
if not os.path.isdir(os.path.join(cwd, "01_CombineGVCFs")):
sys.exit("Directory 01_CombineGVCFs does not exits exists, something went wrong here.")
if os.path.isdir(os.p... | [
"def genotype_gvcfs(gatk, xmx, cores,\n inputs, output,\n reference, bed_file=None):\n commands = []\n command = GENOTYPEGVCFS_TEMPLATE.format(xmx, gatk, reference, output)\n command = command + ' --variant ' + ' --variant '.join(inputs)\n if bed_file is not None:\n command = comma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a duplicate of the profile instance. | def duplicate(self):
duplicate = Profile()
for i in self.__dict__:
if type(getattr(self, i)) is dict:
setattr(duplicate, i, getattr(self, i).copy())
else:
setattr(duplicate, i, getattr(self, i))
return duplicate | [
"def copy(self):\n return Profile(\n self._api._req(self._path(batch=True),\n method='post',\n json_res=True,\n assert_status=201,\n data=self.dumps()),\n self._api._req(self._path())... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
To save this profile intance to xml file using a XmlWriter. xwriter>should be a XmlWriter instance. | def save_to_xml(self, xwriter):
xwriter.WriteStartElement("Profile")
xwriter.WriteAttributeString("Name", self.Name)
xwriter.WriteStartAttribute("Version")
xwriter.WriteValue(self.Version)
xwriter.WriteEndAttribute()
for var_name in self.__dict__:
v... | [
"def save_profile(file_path, profile):\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n profile.save_to_xml(writer)\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured wri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes a dictionary to an xml file in the form of etc. attribute_name>The name of the dictonary attribute to write. xmlwriter>The xml writer to write with. write_empty>A bool of whether to write empty values to the xml file. Default is don't write them. | def write_dict_to_xml(self, attribute_name, xmlwriter, write_empty=False):
if attribute_name in ("IllegalCharacters", "Months"):
write_empty = True
dictionary = getattr(self, attribute_name)
xmlwriter.WriteStartElement(attribute_name)
for key in dictionary:
... | [
"def writeDictToXMLFile(outfile, target, dict):\n targetStr = \"\\t\\t<Target>%s</Target>\\n\" % (escape(target),)\n for key in dict.keys():\n outfile.write('\\t<AVU>\\n')\n outfile.write(targetStr)\n outfile.write(\"\\t\\t<Attribute>%s</Attribute>\\n\" % (escape(key),) )\n outfile... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes a list to an xml file in the form of value value etc. attribute_name>The name of the list attribute to write. xmlwriter>The xml writer to write with. write_empty>A bool of whether to write empty values to the xml file. Default is don't write them. | def write_list_to_xml(self, attribute_name, xmlwriter, write_empty=False):
attribute_list = getattr(self, attribute_name)
xmlwriter.WriteStartElement(attribute_name)
for item in attribute_list:
if item or write_empty:
xmlwriter.WriteElementString("Item", item)
... | [
"def xml_file_writer(list_of_links):\n root = et.Element(\"links\")\n for link in list_of_links:\n et.SubElement(root, \"link\").text = link\n tree = et.ElementTree(root)\n tree.write(\"active_malwares_file.xml\", encoding='utf-8', xml_declaration=True)",
"def __listToXML( self, name, aList, in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes a string to an xml file in the form of string attribute_name>The name of the string attribute to write. xmlwriter>The xml writer to write with. write_empty>A bool of whether to write empty strings to the xml file. Default is write empty strings. | def write_string_to_xml(self, attribute_name, xmlwriter, write_empty=True):
string = getattr(self, attribute_name)
if string or write_empty:
xmlwriter.WriteElementString(attribute_name, string) | [
"def write_string(writer, name, value):\n return writer.write_element(name, value)",
"def test_write_string():\n buf = make_buffer()\n writer = XmlWriter(buf)\n writer.write_element('value', 'myvalue')\n writer.flush()\n assert_equals(decode_buffer(buf), '<value>myvalue</value>')",
"def wr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes a boolean to an xml file in the form of true/false attribute_name>The name of the attribute to write. xmlwriter>The xml writer to write with. | def write_bool_to_xml(self, attribute_name, xmlwriter):
xmlwriter.WriteStartElement(attribute_name)
xmlwriter.WriteValue(getattr(self, attribute_name))
xmlwriter.WriteEndElement() | [
"def write_boolean(writer, name, value):\n return writer.write_element(name, Writer.render_boolean(value))",
"def writeAttribute(self, *args):\n if type(args[1]) == type(True): return _libsbml.XMLOutputStream_writeAttributeBool(self, *args)\n\n\n return _libsbml.XMLOutputStream_writeAttribute... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load profiles from a xml file. If no profiles are found it creates a blank profile. file_path>The absolute path to the profile file Returns a dict of the found profiles and a list of the lastused profile(s) | def load_profiles(file_path):
profiles, lastused = load_profiles_from_file(file_path)
if len(profiles) == 0:
#Just in case
profiles["Default"] = Profile()
profiles["Default"].Name = "Default"
#Some default templates
profiles["Default"].FileTemplate = "{<series>}{... | [
"def load_profiles_from_file(file_path):\r\n profiles = {}\r\n\r\n lastused = \"\"\r\n\r\n if File.Exists(file_path):\r\n try:\r\n with StreamReader(file_path) as xmlfile:\r\n xmldoc = XmlDocument()\r\n xmldoc.Load(xmlfile)\r\n\r\n if xmldoc.Docume... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads profiles from a file. file_path>The absolute path the xml file Returns a dict of the profiles | def load_profiles_from_file(file_path):
profiles = {}
lastused = ""
if File.Exists(file_path):
try:
with StreamReader(file_path) as xmlfile:
xmldoc = XmlDocument()
xmldoc.Load(xmlfile)
if xmldoc.DocumentElement.Name == "Profiles":... | [
"def import_profiles(file_path):\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n return profiles",
"def load_profiles(file_path):\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n if len(profiles) == 0:\r\n #Just in case\r\n profiles[\"Default\"] = P... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load profiles from a xml file. If no profiles are found it returns an empty dict. file_path>The absolute path to the profile file Returns a dict of the found profiles. | def import_profiles(file_path):
profiles, lastused = load_profiles_from_file(file_path)
return profiles | [
"def load_profiles_from_file(file_path):\r\n profiles = {}\r\n\r\n lastused = \"\"\r\n\r\n if File.Exists(file_path):\r\n try:\r\n with StreamReader(file_path) as xmlfile:\r\n xmldoc = XmlDocument()\r\n xmldoc.Load(xmlfile)\r\n\r\n if xmldoc.Docume... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Saves the profiles to an xml file. | def save_profiles(file_path, profiles, lastused=""):
try:
xSettings = XmlWriterSettings()
xSettings.Indent = True
with XmlWriter.Create(file_path, xSettings) as writer:
writer.WriteStartElement("Profiles")
if lastused:
writer.WriteAttributeStrin... | [
"def save_profile(file_path, profile):\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n profile.save_to_xml(writer)\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured wri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Saves a single profile to an xml file. | def save_profile(file_path, profile):
try:
xSettings = XmlWriterSettings()
xSettings.Indent = True
with XmlWriter.Create(file_path, xSettings) as writer:
profile.save_to_xml(writer)
except Exception, ex:
MessageBox.Show("An error occured writing the settings fi... | [
"def save_profiles(file_path, profiles, lastused=\"\"):\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n writer.WriteStartElement(\"Profiles\")\r\n if lastused:\r\n wri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function returns the softmax derivative value for the given input | def softmax_derivative(x):
der = derivative(softmax,x,dx=1e-9)
return der | [
"def softmax(_input):\n score = np.exp(_input)\n _sum = np.sum(score)\n\n return np.divide(score, _sum)",
"def softmax(x):\n x = x - np.max(x)\n exp_x = np.exp(x)\n softmax_x = exp_x / np.sum(exp_x)\n return softmax_x",
"def softmax_gradient(softmax_result):\r\n\r\n s = softmax_result.re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AppendRows(numRows=1) > bool Append additional rows at the end of the table. | def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__
return (self.GetNumberRows() + numRows) | [
"def appendRow(self):\n self._insertRow(self.table.rowCount())",
"def append_rows(self, rows):\n for row in rows:\n self.append_row(row)",
"def add_rows(self):\n for row in self.rows:\n self.table.add_row(row)",
"def AppendCols(self, numCols=1): # real signature unk... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeleteRows(pos=0, numRows=1) > bool Delete rows from the table. | def DeleteRows(self, pos=0, numRows=1): # real signature unknown; restored from __doc__
if self.data is None or len(self.data) == 0:
return False
for rowNum in range(0,numRows):
self.data.remove(self.data[numRows-1-pos-rowNum])
gridView=self.GetView()
gridView.Be... | [
"def Delete(self, rows):\n query=pgQuery(self.tableSpecs.tabName, self.tableSpecs.GetCursor())\n allWhere=[]\n for row in rows:\n wh=[]\n for colname in self.tableSpecs.keyCols:\n wh.append(\"%s=%s\" % (quoteIdent(colname), quoteValue(self.rows[row][colname])))\n allWhere.append(\"(%s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeleteRows(pos=0, numRows=1) > bool Delete rows from the table. | def DeleteRows(self, pos=0, numRows=1): # real signature unknown; restored from __doc__
if self.data is None or len(self.data) == 0:
return False
for rowNum in range(0,numRows):
self.data.remove(self.data[numRows-1-pos-rowNum])
gridView=self.GetView()
gridView.Be... | [
"def Delete(self, rows):\n query=pgQuery(self.tableSpecs.tabName, self.tableSpecs.GetCursor())\n allWhere=[]\n for row in rows:\n wh=[]\n for colname in self.tableSpecs.keyCols:\n wh.append(\"%s=%s\" % (quoteIdent(colname), quoteValue(self.rows[row][colname])))\n allWhere.append(\"(%s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AppendCols(numCols=1) > bool Exactly the same as AppendRows() but for columns. | def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__
return False | [
"def testAddCols(self):\n data = np.zeros(90, dtype=list(zip(['alt'], [float])))\n data['alt'] = np.arange(0, 90)\n stacker = stackers.ZenithDistStacker(altCol='alt', degrees=True)\n newcol = stacker.colsAdded[0]\n # First - are the columns added if they are not there.\n da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AppendCols(numCols=1) > bool Exactly the same as AppendRows() but for columns. | def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__
return False | [
"def testAddCols(self):\n data = np.zeros(90, dtype=list(zip(['alt'], [float])))\n data['alt'] = np.arange(0, 90)\n stacker = stackers.ZenithDistStacker(altCol='alt', degrees=True)\n newcol = stacker.colsAdded[0]\n # First - are the columns added if they are not there.\n da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reset all noisy layers. | def reset_noise(self):
self.advantage_hidden_layer.reset_noise()
self.advantage_layer.reset_noise()
self.value_hidden_layer.reset_noise()
self.value_layer.reset_noise() | [
"def reset(self):\n for layer in self.layers:\n layer.reset()",
"def reset(self):\n for i in range(2):\n self.noise[i].reset()",
"def reset(self):\n self.noise.reset()",
"def clear():\n _global_config.layers = []",
"def reset_noise(self):\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an undefined output folder path, we return the blank string | def test_make_output_folder_undefined_path(self):
test_object = Maic()
expected_result = ""
self.assertEqual(expected_result,
test_object.make_output_folder(output_folder=None),
"Should get back an empty string for an undefined "
... | [
"def test_make_output_folder_blank_path(self):\n test_object = Maic()\n expected_result = \"\"\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=\"\"),\n \"Should get back an empty string for an output \"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an empty output folder path, we return the blank string | def test_make_output_folder_blank_path(self):
test_object = Maic()
expected_result = ""
self.assertEqual(expected_result,
test_object.make_output_folder(output_folder=""),
"Should get back an empty string for an output "
... | [
"def test_make_output_folder_undefined_path(self):\n test_object = Maic()\n expected_result = \"\"\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=None),\n \"Should get back an empty string for an undefined \"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a complex folder path with multiple embedded slashes, check that the code tries to make the folder and returns the path with a single trailing '/' appended only if required | def test_make_output_folder_path_with_multi_slashes(self, mock_makedirs):
mock_makedirs.return_value = True
test_object = Maic()
path = '/c/o/m/p/l/e/x_p/a/t/h/'
expected_result = path
self.assertEqual(expected_result,
test_object.make_output_folder(outpu... | [
"def format_folder_path(folder_path):\n if folder_path[-1] != '/':\n folder_path += '/'\n\n return folder_path",
"def test_fix_path(self):\n\n expected = \"hello\" + PyFunceble.directory_separator + \"world\" + PyFunceble.directory_separator # pylint: disable=line-too-long\n actual = D... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that an output folder path that exists but does not end with something that looks like a timestamp gets a timestamp added | def test_make_output_folder_exists_no_timestamp(self, mock_makedirs,
mock_logger):
mock_makedirs.side_effect = [OSError, True]
test_object = Maic()
path = "my_path"
sep = os.sep
if os.sep == '\\':
# we've got a backs... | [
"def test_stamp_path_does_not_exists(self):\n if os.path.exists(self.rustc_stamp_path):\n os.unlink(self.rustc_stamp_path)\n self.assertTrue(self.build.program_out_of_date(self.rustc_stamp_path))",
"def test_make_output_folder_exists_with_timestamp_fails(self,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that an output folder path that exists and does end with something that looks like a timestamp raises an exception | def test_make_output_folder_exists_with_timestamp_fails(self,
mock_makedirs):
mock_makedirs.side_effect = [OSError]
test_object = Maic()
path = "my_path-1960-04-04--15-00"
try:
test_object.make_output_folder(output_f... | [
"def test_stamp_path_does_not_exists(self):\n if os.path.exists(self.rustc_stamp_path):\n os.unlink(self.rustc_stamp_path)\n self.assertTrue(self.build.program_out_of_date(self.rustc_stamp_path))",
"def test_make_output_folder_exists_no_timestamp(self, mock_makedirs,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to remove the line numbers from the debug output of gyp and thus reduce the extreme fragility of the stdout comparison tests. | def remove_debug_line_numbers(contents):
lines = contents.splitlines()
# split each line on ":"
lines = [l.split(":", 3) for l in lines]
# join each line back together while ignoring the
# 3rd column which is the line number
lines = [len(l) > 3 and ":".join(l[3:]) or l for l in lines]
return "\n".join(lin... | [
"def remove_curl_debug_lines(text: str) -> str:\n lines = text.split(\"\\n\")\n lines = [line for line in lines if not line.startswith(\"**\")]\n return \"\\n\".join(lines)",
"def strip_debug_commands(data):\n\n # strip block comments\n strippedCode = re.sub(re.compile('<#.*?#>', re.DOTALL), '', da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
File contents matcher that ignores line numbers. | def match_modulo_line_numbers(contents_a, contents_b):
contents_a = remove_debug_line_numbers(contents_a)
contents_b = remove_debug_line_numbers(contents_b)
return TestCommon.match_exact(contents_a, contents_b) | [
"def check_contents(filename, contents, match):\n with open(filename) as handle:\n assert any(contents in line for line in handle) == match",
"def mocked_get_lines_from_file(contents):\n lines = contents.splitlines()\n return lines",
"def assertLogFileDoesntContainsLineRegex(self, expression, fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fails the test if the specified built file name does not exist. | def built_file_must_exist(self, name, type=None, **kw):
return self.must_exist(self.built_file_path(name, type, **kw)) | [
"def built_file_must_not_exist(self, name, type=None, **kw):\n return self.must_not_exist(self.built_file_path(name, type, **kw))",
"def test_files_not_generated(cookies, option, file_name):\n with bake(cookies, extra_context={option: 'no'}) as result:\n assert result.project.isdir()\n assert ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fails the test if the specified built file name exists. | def built_file_must_not_exist(self, name, type=None, **kw):
return self.must_not_exist(self.built_file_path(name, type, **kw)) | [
"def built_file_must_exist(self, name, type=None, **kw):\n return self.must_exist(self.built_file_path(name, type, **kw))",
"def test_files_not_generated(cookies, option, file_name):\n with bake(cookies, extra_context={option: 'no'}) as result:\n assert result.project.isdir()\n assert result.e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fails the test if the contents of the specified built file name do not match the specified contents. | def built_file_must_match(self, name, contents, **kw):
return self.must_match(self.built_file_path(name, **kw), contents) | [
"def built_file_must_not_match(self, name, contents, **kw):\n return self.must_not_match(self.built_file_path(name, **kw), contents)",
"def test_invalid_file(self):\n self.assertFalse(bootstrap.verify(self.bad_src, self.sums, False))",
"def built_file_must_exist(self, name, type=None, **kw):\n retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fails the test if the contents of the specified built file name match the specified contents. | def built_file_must_not_match(self, name, contents, **kw):
return self.must_not_match(self.built_file_path(name, **kw), contents) | [
"def built_file_must_match(self, name, contents, **kw):\n return self.must_match(self.built_file_path(name, **kw), contents)",
"def check_contents(filename, contents, match):\n with open(filename) as handle:\n assert any(contents in line for line in handle) == match",
"def built_file_must_exist(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copies the test configuration from the specified source_dir (the directory in which the test script lives) to the specified dest_dir (a temporary working directory). This ignores all files and directories that begin with the string 'gyptest', and all '.svn' subdirectories. | def copy_test_configuration(self, source_dir, dest_dir):
for root, dirs, files in os.walk(source_dir):
if '.svn' in dirs:
dirs.remove('.svn')
dirs = [ d for d in dirs if not d.startswith('gyptest') ]
files = [ f for f in files if not f.startswith('gyptest') ]
for dirname in dirs:
... | [
"def _setup_build_dir(self, dest):\n\n raw_src_path = self._config.get('source_path')\n if raw_src_path is None:\n src_path = None\n else:\n src_path = self._find_file(Path(raw_src_path), 'test_src')\n if src_path is None:\n raise TestBuilderError... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes the .build_tool attribute. Searches the .build_tool_list for an executable name on the user's $PATH. The first tool on the list is used asis if nothing is found on the current $PATH. | def initialize_build_tool(self):
for build_tool in self.build_tool_list:
if not build_tool:
continue
if os.path.isabs(build_tool):
self.build_tool = build_tool
return
build_tool = self.where_is(build_tool)
if build_tool:
self.build_tool = build_tool
re... | [
"def set_default_tool(tool):\n global _DEFAULT_TOOL\n _DEFAULT_TOOL = tool",
"def build_tool(fields_xpaths):\n return XpathsToolConstructor._get_tool(fields_xpaths)",
"def __find_tool_path(self):\n tool_path = Path(os.path.dirname(os.path.realpath(__file__)))\n # We asume the installi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renames (relocates) the specified source (usually a directory) to the specified destination, creating the destination directory first if necessary. | def relocate(self, source, destination):
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
self.subdir(destination_dir)
os.rename(source, destination) | [
"def os_rename(self, source, destination):\n cmd = ['/bin/mv', source, destination]\n process = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n returncode = subprocess.Popen.wait(process)\n return returncode",
"def move(origin, destination):\n os.rename... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reports that a build is not uptodate. This provides common reporting for formats that have complicated conditions for checking whether a build is uptodate. Formats that expect exact output from the command (make) can just set stdout= when they call the run_build() method. | def report_not_up_to_date(self):
print "Build is not up-to-date:"
print self.banner('STDOUT ')
print self.stdout()
stderr = self.stderr()
if stderr:
print self.banner('STDERR ')
print stderr | [
"async def test_ignore_failed_builds(self):\n self.set_source_parameter(\"result_type\", [\"Success\"])\n self.builds.append({\"result\": \"SUCCESS\", \"timestamp\": 1553686540953})\n jenkins_json = {\n \"jobs\": [{\"name\": \"job\", \"url\": self.job_url, \"buildable\": True, \"colo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs gyp against the specified gyp_file with the specified args. | def run_gyp(self, gyp_file, *args, **kw):
# When running gyp, and comparing its output we use a comparitor
# that ignores the line numbers that gyp logs in its debug output.
if kw.pop('ignore_line_numbers', False):
kw.setdefault('match', match_modulo_line_numbers)
# TODO: --depth=. works around... | [
"def TestGyp(*args, **kw):\n format = kw.pop('format', os.environ.get('TESTGYP_FORMAT'))\n if format != 'ninja':\n raise Exception(\"unknown format %r\" % format)\n return TestGypNinja(*args, **kw)",
"def build(self, gyp_file, target=None, **kw):\n raise NotImplementedError",
"def run_python_file(pytho... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs a build of the specified target against the configuration generated from the specified gyp_file. A 'target' argument of None or the special value TestGyp.DEFAULT specifies the default argument for the underlying build tool. A 'target' argument of TestGyp.ALL specifies the 'all' target (if any) of the underlying bu... | def build(self, gyp_file, target=None, **kw):
raise NotImplementedError | [
"def run_gyp(self, gyp_file, *args, **kw):\n\n # When running gyp, and comparing its output we use a comparitor\n # that ignores the line numbers that gyp logs in its debug output.\n if kw.pop('ignore_line_numbers', False):\n kw.setdefault('match', match_modulo_line_numbers)\n\n # TODO: --depth=. ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the base name of the specified file name, of the specified type. A bare=True keyword argument specifies that prefixes and suffixes shouldn't be applied. | def built_file_basename(self, name, type=None, **kw):
if not kw.get('bare'):
if type == self.EXECUTABLE:
name = name + self._exe
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
elif type == self.SHARED_LIB:
name = self.dll_ + name + self._dll
return na... | [
"def make_fullname(basename, _type=None):\n return '{}.{}'.format(basename, extensions.get(_type, None))",
"def base_name(path):\n return os.path.basename(path)",
"def fileBaseOf(self, filename, withPath=0):\n pos = filename.rfind('.')\n if pos > 0:\n filename = filename[:pos]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs an executable program built from a gypgenerated configuration. The specified name should be independent of any particular generator. Subclasses should find the output executable in the appropriate output build directory, tack on any necessary executable suffix, etc. | def run_built_executable(self, name, *args, **kw):
raise NotImplementedError | [
"def build(self, progname):\n self.run_programm(self.COMPILED[self.progtype][0], \"%s %s %s\" %\\\n (progname, self.COMPILED[self.progtype][1], COMPILED_FILENAME ))\n\n compiled_progname=COMPILED_FILENAME\n return compiled_progname",
"def gen_bin(execname):\n\n template ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert to cygwin path if we are using cygwin. | def ConvertToCygpath(path):
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path | [
"def convert_cygwin_path(path):\n\n\ttry:\n\t\twin_path = subprocess.check_output([\"cygpath\", \"-aw\", path], universal_newlines=True).strip()\n\texcept (FileNotFoundError, subprocess.CalledProcessError):\n\t\tlogger.exception(\"Call to cygpath failed.\")\n\t\traise\n\n\treturn win_path",
"def conditional_abspa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns path to MSBuild for msvs_version or latest available. Looks in the registry to find install location of MSBuild. MSBuild before v4.0 will not build c++ projects, so only use newer versions. | def FindMSBuildInstallation(msvs_version = 'auto'):
import TestWin
registry = TestWin.Registry()
msvs_to_msbuild = {
'2013': r'12.0',
'2012': r'4.0', # Really v4.0.30319 which comes with .NET 4.5.
'2010': r'4.0'}
msbuild_basekey = r'HKLM\SOFTWARE\Microsoft\MSBuild\ToolsVersions'
if not re... | [
"def msbuild(name_or_gen_or_ver):\n ver = to_ver(name_or_gen_or_ver)\n if ver < 12: # VS2012 and below\n import winreg as wr\n msbuild = None\n msbvers = ('4.0', '3.5', '2.0')\n for v in msbvers:\n key = \"SOFTWARE\\\\Microsoft\\\\MSBuild\\\\ToolsVersions\\\\{}\\\\MS... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns appropriate values for .build_tool and .uses_msbuild fields of TestGypBase for Visual Studio. We use the value specified by GYP_MSVS_VERSION. If not specified, we search %PATH% and %PATHEXT% for a devenv.{exe,bat,...} executable. Failing that, we search for likely deployment paths. | def FindVisualStudioInstallation():
possible_roots = ['%s:\\Program Files%s' % (chr(drive), suffix)
for drive in range(ord('C'), ord('Z') + 1)
for suffix in ['', ' (x86)']]
possible_paths = {
'2013': r'Microsoft Visual Studio 12.0\Common7\IDE\devenv.com',
'2012': ... | [
"def _find_msbuild_tool(tool=\"msbuild.exe\", use_windows_sdk=False):\n try:\n import _winreg\n except ImportError:\n import winreg as _winreg\n\n keys_to_check = []\n if use_windows_sdk:\n sdks_root = r\"SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\"\n kits_root = r\"SOFTWAR... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run the dumpbin tool with the specified arguments, and capturing and returning stdout. | def run_dumpbin(self, *dumpbin_args):
assert sys.platform in ('win32', 'cygwin')
cmd = os.environ.get('COMSPEC', 'cmd.exe')
arguments = [cmd, '/c', self.vsvars_path, '&&', 'dumpbin']
arguments.extend(dumpbin_args)
proc = subprocess.Popen(arguments, stdout=subprocess.PIPE)
output = proc.communica... | [
"def dump(args):\n if args.dump_command == \"trace\":\n _dump_trace(args)\n elif args.dump_command == \"checkpoint\":\n _dump_checkpoint(args)\n elif args.dump_command == \"config\":\n _dump_config(args)\n else:\n raise ValueError()",
"def test_bcftools_cli_dump(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an appropriate TestGyp instance for a specified GYP format. | def TestGyp(*args, **kw):
format = kw.pop('format', os.environ.get('TESTGYP_FORMAT'))
if format != 'ninja':
raise Exception("unknown format %r" % format)
return TestGypNinja(*args, **kw) | [
"def _TestSpecFromType(self, test_type):\n if test_type == 'instrumentation':\n return self._BuildAndroidInstrumentationTestSpec()\n elif test_type == 'robo':\n return self._BuildAndroidRoboTestSpec()\n else: # It's a bug in our arg validation if we ever get here.\n raise exceptions.Invalid... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read data from file, and return RDD data | def read_data(file_path, sparkContext):
data_rdd = sparkContext \
.textFile(file_path) \
.map(eval) \
.map(lambda x: (x[0], x[1]))
return data_rdd | [
"def load_data():\n with open('../data/dataset.txt', 'r') as data_file:\n return data_file.read().split('\\n')",
"def parseData(filename):\n\treturn (sc\n\t\t\t.textFile(filename, 4, 0)\n\t\t\t.map(parseDatafileLine)\n\t\t\t.cache())",
"def _read_result_file_(filename, data):\n rows, cols = [], []\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Swap the elements of a pair tuple. | def swap((u, v)):
return (v, u) | [
"def swap((x, y)):\n return (y, x)",
"def swap(a, i, j):\n (a[i], a[j]) = (a[j], a[i])",
"def swap(t, i, j):\n t[i], t[j] = t[j], t[i]",
"def swap(permutation, transposition):\n transposed_permutation = list(permutation)\n i, j = transposition\n transposed_permutation[i], transposed_permutat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the degree for each node in the graph, return the degree result RDD | def calc_degree(graph_rdd):
all_degree = graph_rdd \
.map(swap) \
.union(graph_rdd) \
.map(lambda (x, y): (x, 1)) \
.reduceByKey(add, numPartitions=40)
return all_degree | [
"def get_out_degree_dist(rdd):",
"def degree(self):\n\n # get degree of nodes\n node_degree = self.matrix.astype(bool).sum(0).A[0]\n\n # convert degree to dictionary\n node_degree = dict([(self.index2label[x], node_degree[x]) for x in range(len(node_degree))])\n return node_degr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the requested Detail Placement view in full detail. | def GetDetailPlacementView(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | [
"def detail(self):\n return self._detail",
"def details_view(self):\n return_url = get_redirect_target() or self.get_url('.index_view')\n\n if not self.can_view_details:\n return redirect(return_url)\n\n id = get_mdict_item_or_list(request.args, 'id')\n if id is None:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of all available netloaders | def getNets(self):
return NetLoader.listNetworks() | [
"def getLoaders(self):\n return self.__loaders;",
"def get_loaders(tc):\n if not isinstance(tc, TkContext):\n raise RuntimeError(\"tc parameter must be a TkContext, but recieved %s.\" % type(tc))\n return tc.sc._jvm.org.trustedanalytics.daaltk.saveload.Loaders.getLoaders()",
"def get_availab... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the type of the net | def setType(self, type):
if not self.Loaded:
self.type = type
self.loader = NetLoader.getNetwork(type)
self.isTypeSet = True | [
"def set_network_type(self, nNetworkType):\n\t\tcall_sdk_function('PrlVirtNet_SetNetworkType', self.handle, nNetworkType)",
"def set_network_type(self,network_type):\n self.network_type = network_type\n return self",
"def network_type(self, network_type):\n\n self._network_type = network_ty... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the config of the net | def setConfig(self, cfg):
if not self.Loaded:
self.cfg = cfg
if (cfg != ""):
self.isCfgSet = NetLoader.loadConfig(self.type,cfg)
else:
self.isCfgSet = True | [
"def net_config(self, net_config):\n self._net_config = net_config",
"def netapi32_NetConfigSet(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"server\", \"reserved1\", \"component\", \"level\", \"reserved2\", \"buf\", \"reserved3\"])\n raise RuntimeError('API not implemented')\n jitter.func... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Provides direct access to the netloader | def getNet(self):
return self.loader | [
"def load_device():",
"def loadNetwork(cls):\n # parse the net using sumolib\n parsedNetwork = sumolib.net.readNet(Config.sumoNet)\n # apply parsing to the network\n Network.__applyNetwork(parsedNetwork)",
"def getInstance():\n return net()",
"def lab_network(self) -> None:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the number of neurons in the net | def getNeuronCount(self):
return self.loader.getNeuronCount() | [
"def get_n_neurons(self):",
"def size_in(self):\n if isinstance(self.ensemble.neuron_type, Direct):\n # This will prevent users from connecting/probing Direct neurons\n # (since there aren't actually any neurons being simulated).\n return 0\n return self.ensemble.n_n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a single neuron from the net | def getNeuron(self, index):
return self.loader.getNeuron(index) | [
"def get_neuron(self, position):\n return self.neurons[position]",
"def get_neuron_network(self):\n return self.neuron_network()",
"def get_neuron(name, network):\n neuron = [neuron for neuron in network.neurons if type(neuron).__name__ == name]\n if not neuron:\n raise NameError(\"ne... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively partition the graph G using the the algorithm defined by partition function depth times. | def recursive_partition(G,
partition_function,
depth,
dendogram=False,
**kwargs):
C = [set(G)]
if dendogram:
D = nx.Graph()
for _ in range(depth):
C_next = []
for c in C:
C_nex... | [
"def __call__(self, g, n_partitions):\n\n def _iterative_cutting(g, p):\n \"\"\"helper function (iterative version)\"\"\"\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the creation of LASCOMap using FITS. | def test_fitstoLASCO(lasco):
assert isinstance(lasco, LASCOMap) | [
"def test_fit1(self):\r\n self.test.test_map_fit()",
"def test_map_init(self):\n game_map = Map(CONFIG.MAP_NAME)\n train = Train(idx=1, line_idx=game_map.line[1].idx, position=0)\n game_map.add_train(train)\n\n self.assertTrue(game_map.okey)\n self.assertEqual(len(game_ma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the is_datasource_for method of LASCOMap. Note that header data to be provided as an argument can be a MetaDict object. | def test_is_datasource_for(lasco):
assert lasco.is_datasource_for(lasco.data, lasco.meta) | [
"def test_is_datasource_for(createAIAMap):\n assert createAIAMap.is_datasource_for(createAIAMap.data, createAIAMap.meta)",
"def test_is_datasource_for(eit_map):\n assert eit_map.is_datasource_for(eit_map.data, eit_map.meta)",
"def test_is_datasource_for(kcor):\n assert kcor.is_datasource_for(kcor.data,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the measurement property of the LASCOMap object. | def test_measurement(lasco):
assert lasco.measurement == "white-light" | [
"def test_measurement(createAIAMap):\n assert createAIAMap.measurement.value in [171, 193]\n # aiaimg has 171, jp2path has 193.",
"def test_measurement(eit_map):\n assert eit_map.measurement.value in [195, 171]",
"def test_unit_of_measurement(self):\n for name in self.sensor_dict:\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the observatory property of the LASCOMap object. | def test_observatory(lasco):
assert lasco.observatory == "SOHO" | [
"def test_observatory(eit_map):\n assert eit_map.observatory == \"SOHO\"",
"def test_observatory(createAIAMap):\n assert createAIAMap.observatory == \"SDO\"",
"def test_observatory(kcor):\n assert kcor.observatory == \"MLSO\"",
"def check_observatory(self):\n assert self.observatory in ALL_OBS... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get value from query dict by key | def get_value(self, query_dict, k):
if k in query_dict:
return query_dict[k]
return '' | [
"def get(key):\n\n return db.select_single('things', {'key': key}, None, ['key', 'value'])",
"def getSpecific(self, keyword, key):",
"def find_value(dic, key):\n return dic[key]",
"def query_value(self, key):\n args = [ 'get', self.name, key ]\n self.botio.write_args(args)",
"def get_item(dictio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sort the contents of a directory by last modified date. | def _sorted_ls(path):
def _get_modified_time(f):
return os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=_get_modified_time)) | [
"def sorted_ls(dir):\n mtime = lambda f: os.stat(os.path.join(dir, f)).st_mtime\n return list(sorted(os.listdir(dir), key=mtime))",
"def sort_by_modified(files_or_folders: list) -> list:\n return sorted(files_or_folders, key=os.path.getmtime, reverse=True)",
"def sortFiles(paths):\n paths.sort(key=l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a filepath that contains data about the next change to test. | def get_next_change_file():
path = '/tmp/perf/'
changes_to_test = _sorted_ls(path)
if changes_to_test:
return os.path.join(path, changes_to_test[0]) | [
"def next_checkpoint_path(self) -> Optional[Path]:\n checkpoint_file = _construct_checkpoint_path_name(\n self._latest_checkpoint_id + 1\n )\n return self.latest_checkpoint_dir.joinpath(checkpoint_file)",
"def _get_next_checkpoint_path(self) -> Optional[Path]:\n checkpoint_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retorna els organs de govern depenent del rol i l'estat de l'Organ. Per això fa 3 cerques | def OrgansInside(self):
portal_catalog = api.portal.get_tool(name='portal_catalog')
folder_path = '/'.join(self.context.getPhysicalPath())
values = portal_catalog.searchResults(
portal_type='genweb.organs.organgovern',
sort_on='getObjPositionInParent',
path={'... | [
"def test_retrieve_l_organizations(self):\n pass",
"def thesis_organization(self):\n\n organizations = []\n if 'v52' in self.data['article']:\n for organization in self.data['article']['v52']:\n org = {}\n if '_' in organization:\n o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
convert area in rad^2 to km^2 | def area_rad_to_km(area_rad):
r_earth = 6.37122e3 # SHR_CONST_REARTH, in km
circ = 2*np.pi*r_earth
foo = xr.ufuncs.sqrt(area_rad.copy())
foo *= r_earth
area_km = foo**2
return area_km | [
"def km2m(km):\n return km * 1000",
"def convert_km_to(value, unit):\n if unit == 'km':\n return value\n return value * CoordinatesModel.UNIT_RATIO.get(unit, 1.0)",
"def km2_area(polygons):\n\n reprojected_polygons = [reproject(p) for p in polygons]\n return ops.cascaded_union(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Blindly sets state based on the items like statedict | def __setstate__(self, statedict):
for k, v in list(statedict.items()):
setattr(self, k, v) | [
"def set_state(canvas, state):\n for key, value in state.items():\n set_attribute(canvas, key, value)",
"def setDatas(self, item, varDict):\n item._wState.setChecked(varDict['state'])\n item._wLabel.setText(varDict['label'])\n item._wType.setCurrentIndex(varDict['type'])\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new enum class with the given names and values. | def Enum(name,names,values=None):
e = new.classobj(name,(EnumBase,),{})
e._initialize(names,values)
return e | [
"def create_enum(name, fields, values=None):\n Enum = namedtuple(name, fields)\n if values is None:\n return Enum(*fields)\n return Enum(*values)",
"def define_new(cls, name: str, members: List[UserEnumMemberContainer], _parent_scope: Optional[comp.Component]=None) -> Type['UserEnum']:\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check out a license feature from the license server ahead of time. checkoutlicense(self,feature_) | def checkoutlicense(self,feature_):
res = __library__.MSK_XX_checkoutlicense(self.__nativep,feature_)
if res != 0:
raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) | [
"def checkoutlicense(self,feature_): # 3\n if not isinstance(feature_,feature): raise TypeError(\"Argument feature has wrong type\")\n res = self.__obj.checkoutlicense(feature_)\n if res != 0:\n raise Error(rescode(res),\"\")",
"def checkinlicense(self,feature_): # 3\n if not isinstance... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check in a license feature back to the license server ahead of time. checkinlicense(self,feature_) | def checkinlicense(self,feature_):
res = __library__.MSK_XX_checkinlicense(self.__nativep,feature_)
if res != 0:
raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) | [
"def checkinlicense(self,feature_): # 3\n if not isinstance(feature_,feature): raise TypeError(\"Argument feature has wrong type\")\n res = self.__obj.checkinlicense(feature_)\n if res != 0:\n raise Error(rescode(res),\"\")",
"def checkoutlicense(self,feature_):\n res = __library__.MSK_XX... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Preallocates a thread pool. setupthreads(self,numthreads_) | def setupthreads(self,numthreads_):
res = __library__.MSK_XX_setupthreads(self.__nativep,numthreads_)
if res != 0:
raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) | [
"def _init_pool(self):\n for i in xrange(self.thread_num):\n self.threads.append(DownloadImageThread(self))\n self.threads.append(ProgressInfoThread(self, self.time_to_sleep))",
"def _on_start(self):\n self.__thread_pool = ThreadPool(self.max_threads)\n self.__workers = thre... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Obtains a short description of a response code. getcodedesc(code_) | def getcodedesc(code_):
symname_ = (ctypes.c_char * value.max_str_len)()
str_ = (ctypes.c_char * value.max_str_len)()
res = __library__.MSK_XX_getcodedesc(code_,symname_,str_)
if res != 0:
raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])
_symname_retval = symname_.value.decode("utf-8... | [
"def get_description(self, code):\n try:\n return self.message[str(code)]\n except KeyError:\n return \"Unknown (\" + str(code) + \")\"",
"def get_error_description(self, code):\n self.c.execute(\"SELECT * FROM errorcode WHERE code=%d\" % code)\n return self.c.fet... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enables debug information for the license system. putlicensedebug(self,licdebug_) | def putlicensedebug(self,licdebug_):
res = __library__.MSK_XX_putlicensedebug(self.__nativep,licdebug_)
if res != 0:
raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) | [
"def putlicensedebug(self,licdebug_): # 3\n res = self.__obj.putlicensedebug(licdebug_)\n if res != 0:\n raise Error(rescode(res),\"\")",
"def set_debug(self, debug):\n self.debug = debug",
"def _set_debug(debug):\n global _DEBUG\n _DEBUG = debug\n if debug:\n logging.dis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Input a runtime license code. putlicensecode(self,code_) | def putlicensecode(self,code_):
_code_minlength = value.license_buffer_length
if value.license_buffer_length > 0 and code_ is not None and len(code_) != value.license_buffer_length:
raise ValueError("Array argument code is not long enough: Is %d, expected %d" % (len(code_),value.license_buffer_length))
... | [
"def putlicensecode(self,code): # 3\n if code is None:\n code_ = None\n else:\n try:\n code_ = memoryview(code)\n except TypeError:\n try:\n _tmparr_code = array.array(\"i\",code)\n except TypeError:\n raise TypeError(\"Argument code ha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |