query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Exit the program with errors if loading data was failed. | def exit_if_load_failure(cnf, msg):
if cnf is None:
exit_with_output(msg, 1) | [
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--url\", help=\"Enter a URL to begin.\", required=True)\n args = parser.parse_args()\n try:\n csvData = downloadData(args.url)\n except:\n print 'An error has occured session terminated.\\n\\\n Exiting the ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse the number of the highest index read from the RunInfo.xml | def last_index_read(directory):
read_numbers = [int(read.get("Number", 0)) for read in parsers.get_read_configuration(directory) if read.get("IsIndexedRead", "") == "Y"]
return 0 if len(read_numbers) == 0 else max(read_numbers) | [
"def findMax(self, rootidx):\n\t\tmaxNodeidex = self._downright(rootidx)\n\t\treturn maxNodeidex",
"def getLargestNodeNumber():\n arcpy.env.workspace = WORKING_GDB\n arcpy.env.overwriteOutput = True\n arcpy.Statistics_analysis(SOURCE_NETWORK_SHAPEFILE,'node_number_stats',[['A','MAX'],['B','MAX']],'')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge two Flowcell_Demux_Summary.xml files. | def merge_flowcell_demux_summary(u1, u2, fc_id):
#Read the XML to merge
fc1_f = os.path.join(u1, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),
'Flowcell_demux_summary.xml')
fc2_f = os.path.join(u2, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),
'Flowcell_demux_summary.xml')
fc1 = ... | [
"def merge_demultiplex_stats(u1, u2, fc_id):\n with open(os.path.join(u1, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),\n 'Demultiplex_Stats.htm')) as f:\n ds1 = BeautifulSoup(f.read())\n with open(os.path.join(u2, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),\n 'Demultiplex_Stats... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge two Demultiplex_Stats.htm files. Will append to the Demultiplex_Stats.htm file in u1 the Barcode Lane Statistics and Sample Information found in Demultiplex_Stats.htm file in u2. The htm file should be structured in such a way that it has two tables (in | def merge_demultiplex_stats(u1, u2, fc_id):
with open(os.path.join(u1, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),
'Demultiplex_Stats.htm')) as f:
ds1 = BeautifulSoup(f.read())
with open(os.path.join(u2, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),
'Demultiplex_Stats.htm')) as... | [
"def merge_undemultiplexed_stats_metrics(u1, u2, fc_id):\n with open(os.path.join(u1, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),\n 'Undemultiplexed_stats.metrics'), 'a+') as us1:\n with open(os.path.join(u2, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),\n 'Undemultiplexed_stat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge and sort two Undemultiplexed_stats.metrics files. | def merge_undemultiplexed_stats_metrics(u1, u2, fc_id):
with open(os.path.join(u1, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),
'Undemultiplexed_stats.metrics'), 'a+') as us1:
with open(os.path.join(u2, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),
'Undemultiplexed_stats.metrics... | [
"def merge_demultiplex_stats(u1, u2, fc_id):\n with open(os.path.join(u1, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),\n 'Demultiplex_Stats.htm')) as f:\n ds1 = BeautifulSoup(f.read())\n with open(os.path.join(u2, 'Basecall_Stats_{fc_id}'.format(fc_id=fc_id),\n 'Demultiplex_Stats... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge results of demultiplexing from different demultiplexing folders | def merge_demux_results(fc_dir):
for option in CONFIG['analysis']['bcl2fastq']['options']:
if isinstance(option, dict) and option.get('output-dir'):
_demux_folder = option.get('output-dir')
unaligned_dirs = glob.glob(os.path.join(fc_dir, '{}_*'.format(_demux_folder)))
#If it is a MiSeq r... | [
"def __mergeResultFiles():\n\t# Get path of txt resutls\n\tresults_path = NEST.GetKernelStatus()['data_path']\n\t# Create structure - the dict of a lists. Main file (string) : child files (list)\n\tfiles_map = defaultdict(list)\n\t# Build tree of rough (threaded) files\n\tfiles_list = [file for file in os.listdir(r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the data from the sam stats file e.g. D1641708_S15.bwa.drm.realn.sorted.bam.stats Returns a dictionary containing the relevent summary information. Needs sample name and the folder path containing the data. | def get_sam_stats(sample_name, stats_location):
inner_folder = stats_location.split("/")[-1][:-4] +"/" #get the inner folder name of the zip file
file_name = inner_folder + sample_name+".bwa.drm.realn.sorted.bam.stats" #Find the file.
sample_qc_dict ={}
with zipfile.ZipFile(stats_location) as myzip:
with ... | [
"def get_sam_stats(sample_name,folder):\n\n\tfile_name = sample_name+'.bwa.drm.realn.sorted.bam.stats'\n\n\tif folder[len(folder)-1] != '/':\n\n\t\tfolder = folder + '/'\n\n\tfile_path = folder+file_name\n\n\tdata_dict ={}\n\n\twith open(file_path, 'rb') as csvfile:\n\n\t\treader = csv.reader(csvfile, delimiter='\\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This macro takes as an argument a uvm_sequence_item variable or object. The argument is created using `uvm_create` if necessary, then randomized. In the case of an item, it is randomized after the call to `UVMSequenceBase.start_item()` returns. This is called laterandomization. In the case of a sequence, the subsequenc... | async def uvm_do(seq_obj, SEQ_OR_ITEM):
await uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, seq_obj.m_sequencer, -1, []) | [
"async def uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, SEQR, PRIORITY, *CONSTRAINTS):\n from ..seq.uvm_sequence import UVMSequence\n _seq = uvm_create_on(seq_obj, SEQ_OR_ITEM, SEQR)\n if isinstance(_seq, UVMSequence):\n if SEQ_OR_ITEM.do_not_randomize == 0:\n if SEQ_OR_ITEM.randomize_with(*C... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the same as `uvm_do except that the constraint block in the 2nd argument is applied to the item or sequence in a randomize with statement before execution. | async def uvm_do_with(seq_obj, SEQ_OR_ITEM, *CONSTRAINTS):
await uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, seq_obj.m_sequencer, -1,
*CONSTRAINTS) | [
"async def uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, SEQR, PRIORITY, *CONSTRAINTS):\n from ..seq.uvm_sequence import UVMSequence\n _seq = uvm_create_on(seq_obj, SEQ_OR_ITEM, SEQR)\n if isinstance(_seq, UVMSequence):\n if SEQ_OR_ITEM.do_not_randomize == 0:\n if SEQ_OR_ITEM.randomize_with(*C... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the same as `uvm_do_pri except that the given constraint block is applied to the item or sequence in a randomize with statement before execution. | async def uvm_do_pri_with(seq_obj, SEQ_OR_ITEM, PRIORITY, *CONSTRAINTS):
await uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, seq_obj.m_sequencer,
PRIORITY, *CONSTRAINTS) | [
"async def uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, SEQR, PRIORITY, *CONSTRAINTS):\n from ..seq.uvm_sequence import UVMSequence\n _seq = uvm_create_on(seq_obj, SEQ_OR_ITEM, SEQR)\n if isinstance(_seq, UVMSequence):\n if SEQ_OR_ITEM.do_not_randomize == 0:\n if SEQ_OR_ITEM.randomize_with(*C... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the same as `uvm_do_with` except that it also sets the parent sequence to the sequence in which the macro is invoked, and it sets the sequencer to the specified ~SEQR~ argument. The user must supply the constraints using lambdas. | async def uvm_do_on_with(seq_obj, SEQ_OR_ITEM, SEQR, *CONSTRAINTS):
await uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, SEQR, -1,
*CONSTRAINTS) | [
"async def uvm_do_with(seq_obj, SEQ_OR_ITEM, *CONSTRAINTS):\n await uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, seq_obj.m_sequencer, -1,\n *CONSTRAINTS)",
"async def uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, SEQR, PRIORITY, *CONSTRAINTS):\n from ..seq.uvm_sequence import UVMSequence\n _seq = uvm_c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the same as uvm_do_pri_with except that it also sets the parent sequence to the sequence in which the function is invoked, and it sets the sequencer to the specified ~SEQR~ argument. | async def uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, SEQR, PRIORITY, *CONSTRAINTS):
from ..seq.uvm_sequence import UVMSequence
_seq = uvm_create_on(seq_obj, SEQ_OR_ITEM, SEQR)
if isinstance(_seq, UVMSequence):
if SEQ_OR_ITEM.do_not_randomize == 0:
if SEQ_OR_ITEM.randomize_with(*CONSTRAINTS... | [
"async def uvm_do_on_with(seq_obj, SEQ_OR_ITEM, SEQR, *CONSTRAINTS):\n await uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, SEQR, -1,\n *CONSTRAINTS)",
"async def uvm_do_pri_with(seq_obj, SEQ_OR_ITEM, PRIORITY, *CONSTRAINTS):\n await uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, seq_obj.m_sequencer,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the same as `uvm_send` except that the sequence item or sequence is executed with the priority specified in the argument. | async def uvm_send_pri(seq_obj, SEQ_OR_ITEM, PRIORITY):
from ..seq.uvm_sequence import UVMSequence
_seq = SEQ_OR_ITEM
if isinstance(_seq, UVMSequence):
await _seq.start(_seq.get_sequencer(), seq_obj, PRIORITY, 0)
else:
await seq_obj.start_item(SEQ_OR_ITEM, PRIORITY)
await seq_obj... | [
"async def uvm_do(seq_obj, SEQ_OR_ITEM):\n \n await uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, seq_obj.m_sequencer, -1, [])",
"def setPriority(self, priority):\n\n command = 'SET PRIORITY {}'.format(priority)\n\n d = self.sendCommand(command)\n d = d.addCallback(self.resultAsInt)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Default constructor, it holds lanes_in and lanes_out so the calling object can keep track of how many times it needs to call enter, arrive, leave, or exit. Additionally, min_t_to_pass keeps track of the minimum timesteps which are required to pass the road (Related to length) | def __init__(self, lanesin=1, lanesout=1, min_t_to_pass=10):
self.q_going_in = [] # Used lists instead of queue to allow peeking.
self.q_going_out = []
self.lanes_in = lanesin # Track this for external objects.
self.lanes_out = lanesout
self.min_t_to_pass = min_t_to_pass
... | [
"def __init__(self):\n rospy.init_node('wall_follow')\n self.r = rospy.Rate(5)\n self.publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n self.marker_publisher = rospy.Publisher('/wall_topic', Marker, queue_size=10)\n rospy.Subscriber('/scan', LaserScan, self.process_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A method for adding the vehicle back to the road. It specifically is called when the vehicle arrives in the road and before reaching school | def reenter_road(self, vehicle, curr_t):
self.q_going_in.append((vehicle, (curr_t - self.min_t_to_pass))) | [
"def enter_road(self, vehicle, curr_t):\n self.q_going_in.append((vehicle, curr_t))",
"def addVehicle(self, vehicleTour: VehicleTour) -> None:\n if vehicleTour in self.vehicleTours.values():\n Circulation.logger.warning(\"Replacing existing vehicle tour in circulation!\")\n self.ve... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A method for adding the vehicle to the road. It specifically is called when the vehicle arrives in the road and before reaching school | def enter_road(self, vehicle, curr_t):
self.q_going_in.append((vehicle, curr_t)) | [
"def add_vehicle(vehicle_id, init_route_id):\r\n\r\n traci.vehicle.add(vehicle_id, init_route_id)",
"def add_vehicle(self, vehicle, direction):\n\n data = dict(name=vehicle.name, vehicle_obj=vehicle, direction=direction, rect=vehicle.rect)\n self.vehicles[direction].append(data)",
"def addVehic... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A method which returns the vehicle which has arrived to garage. If No vehicle is ready to arrive (To Garage), it just returns None | def arrive_garage(self, curr_t):
# If there are no vehicles in the road, return None.
if len(self.q_going_in) == 0:
return None
# If the time passed since enterance is less than min_t_to_pass return
# None.
if curr_t - self.q_going_in[0][1] <= self.min_t_to_pass:
... | [
"def arrive(self, v):\n time = self.triptime\n if time == 0 or not v.onroad:\n time = int(self.dist / v.get_velocity('m/s'))\n leave_time = self.time + time\n\n self.vehicle.append((v, leave_time))\n if v.mode != 'walk':\n logging.info(f'Time {self.time}: Vel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A method which returns the vehicle which has arrived to gate. If No vehicle is ready to exit (To Gate), it just returns None | def exit_road(self, curr_t):
# If there are no vehicles in the road, return None.
if len(self.q_going_out) == 0:
return None
# If the time passed since leaving garage is less than min_t_to_pass
# return None.
elif curr_t - self.q_going_out[0][1] <= self.min_t_to_pas... | [
"def arrive_garage(self, curr_t):\n\n # If there are no vehicles in the road, return None.\n if len(self.q_going_in) == 0:\n return None\n\n # If the time passed since enterance is less than min_t_to_pass return\n # None.\n if curr_t - self.q_going_in[0][1] <= self.min_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For each directory from BASE_DIR up to topdir in the directory tree, append the specified path(s) and return the resulting sequence. | def base_to_top_paths(topdir, paths, only_existing=True):
if isinstance(paths, str):
paths = (paths,)
cur_dir = os.path.abspath(BASE_DIR)
stop_dir = os.path.abspath(topdir)
iterate = True
while iterate:
for path in paths:
full_path = os.path.join(cur_dir, path)
... | [
"def walk(top, topdown = True, onerror = None):\n if top.endswith('/'):\n top = top[:-1]\n try:\n names = blue.paths.listdir(top)\n except blue.error as err:\n if onerror is not None:\n onerror(err)\n return\n\n dirs, nondirs = [], []\n for name in names:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find a file in a top to bottom search through the repository hierarchy. For all repositories/directories from `topdir` down to the core repository in BASE_DIR, check whether the relative `path` exists and, if yes, return its absolute path. `path` can be any file system object, including links and directories. If `path`... | def find_path(path, topdir):
paths = list(base_to_top_paths(topdir, path))
if paths:
return paths[-1]
else:
raise IOError("Unable to find the relative path '{}' in the repository hierarchy".format(path)) | [
"def get_topdir():\n path = Path(os.path.dirname(__file__))\n while True:\n if (path / \".top\").exists():\n return path\n if path.parent == path:\n # Seems like we reached the home /\n raise ValueError(\"Couldn't determine root directory.\")\n path = path... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reverse the operation performed by `base_path`. For all `x`, `un_base_path(base_path(x)) == x`. | def un_base_path(path):
if BASE_DIR == '':
return path
return path[len(BASE_DIR) + 1:] | [
"def reversed(self):\n path = self.path.reversed()\n t0 = path.len() - self.t0\n t1 = path.len() - self.t1\n return TrattoPath(path, t1, t0)",
"def remove_base(base, iri):\n # TODO: better sync with jsonld.js version\n # skip IRI processing\n if base is None:\n return i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a single path or a list of paths so that they are safe to pass as command line parameters to git. This is necessary to account for differences in how git binaries handle paths across platforms. In particular, when combining a native Python interpreter with a cygwin git binary on Windows, all paths passed to git... | def convert_paths(self, paths):
assert isinstance(paths, (str, list))
def convert(path):
if os.path.isabs(path):
path = os.path.relpath(path, self.local_repository)
return path.replace(os.sep, self.sep)
if isinstance(paths, str):
return conve... | [
"def localize_path_args(args: List[str], path_flags: Collection[str], input_dir: str) -> List[str]:\n if len(args) == 0:\n return args\n if args[0] in path_flags:\n return [args[0], input_dir] + localize_path_args(args[2:], path_flags, input_dir)\n return args[0:1] + localize_path_args(args[1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a set of remote branches excluding the remote prefix. | def _get_remote_branches(self):
return frozenset([ref.split('/', maxsplit=1)[1] for ref in self._get_remote_refs()]) | [
"def get_branches_on_remote(self, remote):\n output = self.run_git_cmd(['branch', '--remote', '--no-color'])\n return self._get_branches_from_branch_remote_output(output, remote)",
"def _get_remote_refs(self):\n return frozenset([line[2:].strip() for line in self._do(['branch', '-r'], as_line... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a set of remote refs including the remote prefix (typically 'origin/'). | def _get_remote_refs(self):
return frozenset([line[2:].strip() for line in self._do(['branch', '-r'], as_lines=True)]) | [
"def _get_remote_branches(self):\n return frozenset([ref.split('/', maxsplit=1)[1] for ref in self._get_remote_refs()])",
"def remote_refs(remote, heads=False, tags=False):\n args = ['git', 'ls-remote', remote]\n if heads:\n args.insert(2, '--heads')\n if tags:\n args.insert(2, '--ta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine the currently active branch in the local git repository and return its name as a string. | def get_active_branch(self):
pattern = '* '
for line in self._do(['branch'], as_lines=True):
if line.startswith(pattern):
return line.split(' ', maxsplit=1)[1].strip()
raise LookupError('No active branch in git repository ' + self.local_repository) | [
"def get_active_branch():\n return git.Repo().active_branch.name",
"def get_active_branch(self):\n \n try:\n ab = self.repo.active_branch.name\n except TypeError:\n ab = \"(no branch)\"\n \n return ab",
"def get_current_branch_name() -> str:\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new branch, optionally from a specific start point. If track is set to True, then 'track' will be passed to git. If track is set to False, then 'notrack' will be passed to git. If track is None, then no tracking flag will be passed to git. | def branch(self, name, start_point=None, *, track=None):
params = ['branch']
if track is not None:
params.append('--track' if track else '--no-track')
params.append(name)
if start_point is not None:
params.append(start_point)
return self._do(params) | [
"def gen_new_track(track, index):\n detection = track.meta[DETKEY][index]\n return Track(id=track.id, ids=[detection.id], timestamps=[detection.timestamp],\n meta={DETKEY: [detection]})",
"def create(self, name, base=None, fetch=False):\n # :todo:rethink: feature branch is... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the upstream / tracking branch of a given branch. If branch is None, it defaults to the current branch. | def set_upstream(self, upstream, branch=None):
params = ['branch', '-u', upstream]
if branch:
params.append(branch)
return self._do(params) | [
"def setBranch(self, branch):\n if branch is None:\n self.pop(Header.PARAM_BRANCH)\n else:\n self[Header.PARAM_BRANCH] = branch",
"def set_branch(component=\"neutron\", branch=\"master\"):\n ip = get_lab_vm_ip()\n with settings(host_string=ip, abort_on_prompts=True, warn_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge the specified revision ID into the currently active branch. | def merge_into_active_branch(self, revid, *options):
assert isinstance(revid, str)
return self._do(['merge'] + list(options) + [revid]) | [
"def merge(app,\n revisions: \"one or more revisions, or 'heads' for all heads\",\n *,\n message: \"message string to use with 'revision'\" = None,\n branch_label: 'specify a branch apply to the new revision' = None,\n rev_id: (\n 'specify a hardcoded revisi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch new revisions from the specified remote. | def fetch(self, remote='--all', prune=False):
if prune:
prune_option = ['--prune']
else:
prune_option = []
return self._do(['fetch'] + prune_option + [remote]) | [
"def pull(self, remote = 'origin'):",
"def fetch_remote(self):\n # TODO(dhermes): This assumes remote_info is not None. Fix this.\n remote_info = self.__rietveld_info.remote_info\n print utils.capture_command('git', 'fetch', remote_info.remote,\n single_line... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Push the local revision 'src' to a remote. If 'src' has a remote tracking branch, that is where 'src' is pushed to. | def push(self, src=None, force=False, set_upstream=False):
assert src is None or isinstance(src, str)
assert isinstance(force, bool)
if src is None:
src = self.get_active_branch()
if force:
force_option = ['--force']
else:
force_option = []
... | [
"def push():\n#\tupdate_local_repo()\n#\ttest()\n\tpush_repo_changes()\n\tupdate_remote()",
"def _push(self, src, dst):\n force = False\n if src.startswith('+'):\n src = src[1:]\n force = True\n present = [self._refs[name][1] for name in self._refs]\n present.exte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rename a local resource from its old name 'src' to its new name 'dst' or move a list of local files 'src' into a directory 'dst'. | def move(self, src, dst):
assert isinstance(src, (str, list))
assert isinstance(dst, str)
if isinstance(src, str):
src_list = [src]
else:
src_list = src
return self._do(['mv'] + self.convert_paths(src_list) + [self.convert_paths(dst)]) | [
"def Rename(src, dst):\n os.rename(src, dst)",
"def rename(src, dst):\n if sys.platform == \"win32\":\n return win32_rename(src, dst)\n else:\n return os.rename(src, dst)",
"def move(src, dst):\r\n\r\n try:\r\n os.rename(src, dst)\r\n except OSError:\r\n if os.path.isdir... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Commit the changes in the specified 'files' with the given 'message' to the currently active branch. If 'files' is None (or unspecified), all staged files are committed. | def commit(self, msg, files=None):
assert isinstance(msg, str)
assert files is None or isinstance(files, list)
if files is None:
file_args = []
else:
file_args = self.convert_paths(files)
return self._do(['commit', '-m', msg] + file_args) | [
"def commit(repo, files, message):\n if not message:\n marker = '# Files to be committed:'\n hint = ['', '', marker, '#']\n for file in files:\n hint.append('# U %s' % file)\n message = click.edit('\\n'.join(hint))\n if message is None:\n click.echo('Abo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rename a local branch from its current name 'src' to the new name 'dst'. | def rename_branch(self, src, dst):
assert isinstance(src, str)
assert isinstance(dst, str)
return self._do(['branch', '-m', src, dst]) | [
"def Rename(src, dst):\n os.rename(src, dst)",
"def rename(src, dst):\n if sys.platform == \"win32\":\n return win32_rename(src, dst)\n else:\n return os.rename(src, dst)",
"def rename(self, path, dst, opt=None):\n\n url = self._paths_url(path, 'rename')\n self._post(url, opt,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return information from the latest commit with a specified `pretty` format. The log from a specified branch may be specified. See `git log` man page for possible pretty formats. | def _log_pretty(self, pretty_fmt, branch=None):
# Future directions: Rather than just the latest commit, allow the caller
# specify the number of commits. This requires additional parsing of the
# result to return a list, rather than just a single item.
# Additionally, the caller could p... | [
"def get_latest_commit_time(branch):\n cmd_str = GIT + \" show -s --format=\\\"%ct\\\" \" + branch\n return run_cmd(cmd_str)",
"def print_commit(commit, decode, outstream=sys.stdout):\n outstream.write(\"-\" * 50 + \"\\n\")\n outstream.write(\"commit: \" + commit.id.decode(\"ascii\") + \"\\n\")\n i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the hash of the latest commit on a given branch as a UNIX timestamp. The branch may be ommitted, in which case it defaults to the current head. | def branch_hash(self, branch=None):
return self._log_pretty('%H', branch=branch) | [
"def get_latest_commit_time(branch):\n cmd_str = GIT + \" show -s --format=\\\"%ct\\\" \" + branch\n return run_cmd(cmd_str)",
"def latest_hash() -> str:\n ret = subprocess.run([\"git\", \"rev-parse\", \"HEAD\"], capture_output=True, check=True)\n assert ret.returncode == 0, \"Failed to get latest com... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True is the working directory is clean. | def working_dir_clean(self):
return self._do(['status', '--porcelain']) == '' | [
"def clean():\n title(\"Cleaning build dir...\")\n Mambo(CWD).clean_build_dir()\n done()",
"def _delete_working_dir(self):\n print(\"delete\")\n if os.path.exists(self.path):\n shutil.rmtree(self.path)",
"def clean(self, clean_todo=True, clean_done=True, clean_fail=True):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve all remotes known in the local repository as a set of Remote objects. | def get_remotes(self):
remotes = set()
for line in self._do(['remote', '-v'], as_lines=True):
parts = line.split('\t')
remotes.add(Remote(parts[0], parts[1]))
return remotes | [
"def get_all_remotes():\n if not in_repo:\n return None\n\n config = configparser.ConfigParser()\n config.read(find_repo_toplevel(\".\") / \".git/config\")\n\n remotes = {\n x.removeprefix('remote \"').removesuffix('\"'): {\n \"url\": config[x][\"url\"],\n \"name\": x... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts an arbitrary valid path to an normalized, absolute path free of symbolic link indirections. | def _sanitize_path(path):
return os.path.normpath(follow_link(os.path.abspath(path))) | [
"def resolve_symlink(path):\n if not is_windows():\n # Only does this dance on Windows.\n return path\n parts = os.path.normpath(path).split(os.path.sep)\n for i in range(2, len(parts)):\n partial = os.path.sep.join(parts[:i])\n if os.path.isfile(partial):\n with open(partial) as f:\n lin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Modify a file by matching and replacing individual lines. `path` is a string identifying the path of the file to modify. `line_filters` is an iterable container of LineFilter objects. For each line in the file, each filter is applied in order. `only_if_all_matched` is a boolean controlling when the file is rewritten. W... | def update_file(path, line_filters, only_if_all_matched=False):
sio_obj = io.StringIO()
updated = {flt: False for flt in line_filters}
newlines = None
with open(path, encoding='utf8') as file_obj:
for line_no, line in enumerate(file_obj, 1):
if newlines is None:
newli... | [
"def apply_filter(full_path, filter_rexs):\n for rex in filter_rexs:\n if rex.match(full_path):\n return True\n return False",
"def iter_all_lines__(file_path: str, use_tqdm: bool = False, tqdm_msg: str = None, lstrip=False, rstrip=True,\r\n line_filter:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call this method to register your plugins to your matcher, either from your additional matcher methods (``with_something`` or ``and_someting``) or from the ``__init__`` method. NOTE that you must return ``self`` from those additional matcher methods. | def register(self, plugin):
self._matchers[plugin] = False
return self | [
"def _register_plugins(cls, project: module.Project) -> None:\n currently_registered_plugins: typing.Mapping[\n str, typing.Type[plugins.Plugin]\n ] = plugins.PluginMeta.get_functions()\n loaded_mod_ns_pattern: typing.Pattern[str] = re.compile(\n \"(\" + \"|\".join(re.esca... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register a plugin for a plugin for a kwarg argument | def register_for_kwarg(self, plugin, kwarg=None):
if kwarg is not None:
self.register(plugin) | [
"def register_plugin(plugin):\n plugins.append(plugin)",
"def register_plugin(plugin):\n if plugin.plugin_name not in PLUGINS:\n PLUGINS[plugin.plugin_name] = plugin",
"def register(func):\r\n package, _, plugin = func.__module__.rpartition('.')\r\n ## where the function is defined.\r\n pk... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create default AsanaAPI instance. | def _create_asana_api(api_key, debug=False):
return asana.Client.access_token(api_key) | [
"def create_api():\n auth = tweepy.OAuthHandler(CredentialsConfig.TWITTER_CONSUMER_KEY, CredentialsConfig.TWITTER_CONSUMER_SECRET)\n auth.set_access_token(CredentialsConfig.TWITTER_ACCESS_TOKEN, CredentialsConfig.TWITTER_TOKEN_SECRET)\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_not... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the asana task plugin. | def initialize(self):
try:
api_key = self._pomodoro_service.get_config("task.asana", "api_key")
self.asana_api = self._get_asana_api(api_key)
except Exception as ex:
logger.error("Error initializing plugin: {0}".format(ex)) | [
"def __init__(self):\n super(TNL3ServicePlugin, self).__init__()\n self._tn_info = None\n # self._driver = None\n self.task_manager = tasks.TaskManager()\n self.task_manager.start()\n self.tn_init()",
"def initialize_plugin(self) -> None:\n pass",
"def init(task,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of voice labels belonging to the provided list of choice_options. | def choice_options_resolve_voice_labels(choice_options, language):
choice_options_voice_labels = []
for choice_option in choice_options:
choice_options_voice_labels.append(choice_option.get_voice_fragment_url(language))
return choice_options_voice_labels | [
"def get_choice_label(choices, string):\n labels = []\n\n if string and isinstance(string, six.string_types) and choices:\n label = find_choice_label(choices, string)\n\n if label:\n labels.append(label)\n else:\n # Try to get labels by splitting the string\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a dict that can be used to generate the choice VXML template choice = this Choice element object choice_voice_label = the resolved Voice Label URL for this Choice element choice_options = iterable of ChoiceOption object belonging to this Choice element choice_options_voice_labels = list of resolved Voice Label ... | def choice_generate_context(choice_element, session):
choice_options = choice_element.choice_options.all()
language = session.language
context = {'choice':choice_element,
'choice_voice_label': choice_element.get_voice_fragment_url(language),
'choice_options': choice_options,... | [
"def setup_response(self):\n # call secondary setup for MultipleChoice questions, to set name\n # attributes\n self.mc_setup_response()\n\n # define correct choices (after calling secondary setup)\n xml = self.xml\n cxml = xml.xpath('//*[@id=$id]//choice', id=xml.get('id'))... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Protected. Indicate that the object has changed. | def setChanged(self):
self._hasChanged = True | [
"def changed(self):\n return self._changed",
"def notify_changed(self):\n self._notify_trait('array', self.array, self.array)",
"def setModified(self):\n self.modified = True",
"def has_changed (self):\n if (self.collection != None) and self.collection.item_changed:\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Protected. Notify observers only if the object has been modified. Doesn't notify if a batch change is going on. | def notifyObserversIfChanged(self, data = None ):
if not self.isDoingBatchChanges() and self.hasChanged():
for observer in self._observers:
observer.update( data )
self.clearChanged() | [
"def _notifyIfChanged_(self, other):\r\n if (self._manager is not None) and self._manager._changeListeners and (other != self):\r\n for changeListener in self._manager._changeListeners:\r\n changeListener(False, self)",
"def has_changed (self):\n if (self.collection != None... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if this object is currently going through a batch of changes. | def isDoingBatchChanges(self):
return self._batchChangeDepth > 0 | [
"def hasChanges(self):\n return self.changes",
"def has_changes(self):\n return self._repo.is_dirty()",
"def bones_changed(self) -> bool:\n\n for bone in self.bones:\n bone_regex = re.compile(bone)\n for changed in self.changed_set:\n if bone_regex.searc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Indicates that we want to begin performing batch changes on this object. Every call to beginBatchChanges() should be matched by a call to endBatchChanges(), observers will only be notified when the first begin is closed by the last end. The call to endBatchChanges() should usually be in a finally clause. | def beginBatchChanges(self):
self._batchChangeDepth += 1 | [
"def onBatchStarted(self, batchStarted):\n pass",
"def begin(self):\n\t\tif self._status != self._INITIAL:\n\t\t\traise ValueError(\"Batch already started previously.\")\n\t\tself._status = self._IN_PROGRESS\n\n\t\tfrom viur.xeno.databases import dbinterface\n\t\tdbinterface.transaction_start()\n\t\tdbinte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Indicates that we want to end performing batch changes on this object. Every call to beginBatchChanges() should be matched by a call to endBatchChanges(), observers will only be notified when the first begin is closed by the last end. The call to endBatchChanges() should usually be in a finally clause. | def endBatchChanges(self):
self._batchChangeDepth -= 1
self.notifyObserversIfChanged() | [
"def end_batch(self) -> None:\n self.handle(events.EndBatch())",
"def onBatchCompleted(self, batchCompleted):\n pass",
"def _run_callbacks_on_batch_end(batch, logs):\n mlp_log.mlperf_print(\n 'block_stop', None, metadata={\n 'first_epoch_num': int(batch),\n })\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes the specified observer from the list of observers. | def deleteObserver(self, observer):
self._observers.remove(observer) | [
"def unregister(self, observer):\n self.observers.remove(observer)",
"def detach(self, observer):\r\n self.observerList.remove(observer)",
"def unregister(self, observers):\n\n if isinstance(observers, list) or isinstance(observers, tuple):\n for observer in observers:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the number of observers of this object. | def countObservers(self):
return len( self._observers ) | [
"def subscriber_count(self):\n return len(self._subscribers)",
"def obj_count(self):\n with self._lock:\n return self._obj_count",
"def tracklet_count(self):\n return len(self.tracklets)",
"def view_count(self):\n return len(self.signal_views)",
"def n_events(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes an open file object and an optional empty list and returns the location inside the file where the cert resides , as well as the cert size as [size] so use CertSize[0] to get the value. | def LocateCert(myfile, CertSize=None): # set CertSize=[] to receive back the size of the Cert
wPE32basedValue = 0 # if wPE32 == 0x10b (267) , then value is 128 otherwise 144
dw = 0
dwSize = 0
wPE32 = None
myfile.seek(0x3c,0)
dw = myfile.read(4)
# print "dw = %s" % unpack('<i',dw)
myfile.seek(unpack('<i',dw)[0] ... | [
"def system_cert_file():\n\tfor f in authority_certificate_files:\n\t\tif os.path.exists( f):\n\t\t\treturn f\n\treturn None",
"def get_cert(filepath=None):\n filepath = filepath or TLS_FILEPATH\n with open(filepath, 'rb') as f:\n cert = f.read()\n return cert",
"def CertInfo(fname: str) -> RET:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if batch returned from SemiSupervisedIterator is labeled. | def is_labeled(batch: Union[Dict, Sequence]) -> bool:
if isinstance(batch, dict):
return batch[SemiSupervisedIterator.IS_LABELED_TAG]
elif isinstance(batch, tuple):
item, idx = batch
return idx == 0
else:
raise ValueError(f"Unknown type: {type(batc... | [
"def __is_labeled(self, chunk):\n\n if chunk[0][0:3] == '!*!':\n return True\n return False",
"def same_label(self):\n return len(self.labels_list)<=1",
"def check_label(gold: ScanResult, result: ScanResult) -> bool:\n if len(gold.labels) != len(result.labels):\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize and prunes weights if needed. | def init_weights(self):
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights() | [
"def init_weights(self):\n # Prune heads if needed\n if self.config.pruned_heads:\n self.prune_heads(self.config.pruned_heads)\n\n self.apply(self._init_weights)\n\n # Tie weights should be skipped when not initializing all weights\n # since from_pretrained(...) calls t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check if shapes of 2 arrays match | def _checkShape(x: np.ndarray, y: np.ndarray):
assert x.shape == y.shape, (f"Dimensions of image 1 {x.shape} "
f"do not match image 2 {y.shape}") | [
"def check_same_dim(shape_x, shape_y):\n shape_x_len = len(shape_x)\n for k in range(shape_x_len):\n if shape_x[k] != shape_y[k]:\n return False\n\n return True",
"def shape_matches(s_legal, s_actual):\n # Array must have required number of dimensions\n if len(s_legal) != ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use scipy stat's pearsonr function | def pearson(x, y):
_checkShape(x, y)
return stats.pearsonr(x.flat, y.flat)[0] | [
"def pearsonr(df):\n r, p = nwise_apply(df, metrics.pearsonr, n=2, comm=True)\n return _dict_to_namedtuple(r, 'Pearsons_r'), _dict_to_namedtuple(p, 'p_value')",
"def pearson_correlation(X, Y):\n # get the covariance cov(X, Y)\n covariance = np.cov(X, Y, bias=True)[0][1]\n r = covariance / float(np.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
raise error if the metric being called has not been implemented | def _checkMetricImplemented(self,
metric: str,
) -> None:
if metric not in self.metric_list:
raise NotImplementedError(
f"Method {metric} not recognised.\n"
f"Possible methods are:\n" +
"\... | [
"def get_metric_func(self):",
"def test_get_metrics(self):\n pass",
"def test_metric_default_return():\n\n from foreshadow.metrics import MetricWrapper\n\n def test(X):\n raise Exception\n\n metric_wrapper = MetricWrapper(test, 0)\n assert 0 == metric_wrapper.calculate([1, 2, 3])",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
take a series of numerical or categorical values and map to a number of colours, creating a row or column colour series recognised by sns.clustermap | def _makeRowColours(self,
annotation_series: pd.Series,
palette: str = "Set1", # palette for categorical data
cmap=cm.Blues, # colormap for numeric data
verbose=True,
) -> Tuple[pd.Series,
... | [
"def custom_cmap(n):\n # first color is grey from Set1, rest other sensible categorical colourmap\n cmap_array = sns.color_palette(\"Set1\", 9)[-1:] + sns.husl_palette(n - 1, h=.6, s=0.7)\n cmap = colors.LinearSegmentedColormap.from_list('mmdgm_cmap', cmap_array)\n return cmap, cmap_array",
"def Color... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A transaction to claim a job. The transaction is rolled back if the status is not 'waiting'. | def claimJob(self, job_key):
job = Job.get_by_id(job_key)
if job.status != 'waiting':
raise db.Rollback()
job.status = 'started'
# pylint: disable-msg=E1103
if job.put():
return job
else:
return None | [
"def job():\n\n job = Mock()\n job.state = pypachy.JOB_SUCCESS\n\n return job",
"def test_enqueue_job(self):\n job_id = MockJobManagerOne.create_new()\n MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)\n self.assertEqual(\n MockJobManagerOne.get_sta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Timeout a job. If a job has timed out more than 50 times, the job is aborted. | def timeoutJob(self, job):
job.timeouts += 1
if job.timeouts > 50:
job.status = 'aborted'
else:
job.status = 'waiting'
job.put()
job_id = job.key().id()
logging.debug("job %d now timeout %d time(s)" % (job_id, job.timeouts)) | [
"def etimeout():\n return pexc.JobRequestTimedOut(operation_name='foo', seconds=1800)",
"def test_job_limit_timeout(self):\n job_limit = 5\n self.fake_api_backend._api_client = JobTimeoutClient(\n job_limit=job_limit, max_fail_count=1)\n self.fake_api_provider._api_client = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fail a job. If the job has failed more than 5 times, the job is aborted. | def failJob(self, job):
job.errors += 1
if job.errors > 5:
job.status = 'aborted'
else:
job.status = 'waiting'
job.put()
job_id = job.key().id()
logging.warning("job %d now failed %d time(s)" % (job_id, job.errors)) | [
"def fail_job(self, jobid, reason):",
"def fail_monitored_job(job, exit_code, diagnostics, queues, traces):\n\n set_pilot_state(job=job, state=\"failed\")\n job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(exit_code, msg=diagnostics)\n job.piloterrordiag = diagnostics\n traces.pilot['e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Trivial iterator that iterates over jobs then retry_jobs | def iterate(self, jobs, retry_jobs):
for job in jobs:
yield job
while retry_jobs:
yield retry_jobs[0] | [
"def __iter__(self):\n return iter(self.jobs())",
"def __iter_alljobs__(self):\n pass",
"def retryingIter(queryGenerator):\n lastCursor = None\n for i in range(100):\n query = queryGenerator()\n if lastCursor:\n query.with_cursor(lastCursor)\n try:\n for item in query:\n la... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Give the number of ways to take n steps, given that at each step, you can choose to take 1, 2, or 3 steps. >>> stairs(3) 4 >>> stairs(5) 13 >>> stairs(10) 274 | def stairs(n):
if n <= 2:
return n
if n == 3:
return 4
return stairs(n-1) + stairs(n-2) + stairs(n-3) | [
"def stairs(n):\n ### Your code here ###\n if n <= 0:\n return 0\n elif n == 1:\n return 1\n elif n == 2:\n return 2\n else:\n return stairs(n-1) + stairs(n-2)",
"def climbStairs(self, n: int) -> int:\n return 1 if n == 1 else 2 if n == 2 else self.dynamicProgramm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Give the number of ways to take n steps, given that at each step, you can choose to take 1, 2, ... k2, k1 or k steps. >>> kstairs(5, 2) 8 >>> kstairs(5, 5) 16 >>> kstairs(10, 5) 464 | def kstairs(n, k):
if n == 0:
return 0
if n <= k:
return 2**(n-1)
return sum([kstairs(n - i, k) for i in range(1, k + 1)]) | [
"def stairs(n):\n if n <= 2:\n return n\n if n == 3:\n return 4\n return stairs(n-1) + stairs(n-2) + stairs(n-3)",
"def stairs(n):\n ### Your code here ###\n if n <= 0:\n return 0\n elif n == 1:\n return 1\n elif n == 2:\n return 2\n else:\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iteratively finds all possible subsets of a list (including the trivial and null subsets) >>> all_subsets([1,2,3]) [[], [1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]] | def all_subsets(lst):
results = [[]]
while lst:
results += [q + [lst[0]] for q in results]
lst.pop(0)
return sorted(results,key=len) # To appear in a visually appealing order | [
"def all_subsets(lst):\n subsets = [[]]\n # Loop through all items in list and add item to all subsets in subsets list\n for item in lst:\n subsets += [subset + [item] for subset in subsets]\n return subsets",
"def subsets(self):\n result = []\n visited = set()\n for x in s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a tuple of numbers, where each number represents the size of a slice of pie, distribute the slices among 2 people as evenly as possible. (i.e., minimizing the difference between the sums of two sets of values) >>> min_pie((1, 1, 1, 1)) [((1, 1), (1, 1))] >>> min_pie((1, 1, 1, 1, 2, 3)) [((2, 1, 1), (3, 1, 1)), ((... | def min_pie(pie):
def partition(s):
if len(s) == 2:
return [((s[0],), (s[1],))]
ps = partition(s[1:])
return [(p1 + (s[0],), p2) for p1, p2 in ps] + \
[(p1, p2 + (s[0],)) for p1, p2 in ps]
data = {}
for p1, p2 in partition(pie):
data.setdefault(abs... | [
"def brute_force_cow_transport(cows,limit=10):\n # TODO: Your code here\n leastTrip = len(cows)\n for partition in get_partitions(cows):\n# print(partition)\n if len(partition) <= leastTrip:\n point = 0\n for eachtrip in partition:\n tripWeight = 0\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforms the points that where given in meters to point in pixels so that the calibrated image is the biggest without loosing information. | def meter_to_pixel(src_points, dst_meter, image):
# Get the image size
(height, width) = image.shape[: 2]
# --- Get the top down view of the entire pool --- #
# Get the coordinates in pixel of dst_meter in the entire pull
dst_pixel_full_pool = np.zeros((4, 2))
# We take one meter from each side... | [
"def meters2pixels(self, value: float) -> float:\n if self.refscale == 0.0:\n return 0.0\n return SCALE_FACTOR * (value / self.refscale)",
"def point_cloud_to_panorama(points,\n v_res=0.42,\n h_res = 0.35,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calibrates the video from the starting time to the end time. Create the video and a txt file. | def calibrate_video(path_video, time_begin=0, time_end=-1, destination_video=None, destination_txt=None):
if destination_video is None:
destination_video = Path("../data/4_model_output/videos/tries")
if destination_txt is None:
destination_txt = Path("../data/2_intermediate_top_down_lanes/calibr... | [
"def generate_10000_video(input_file='resources/10000.mid', output_file='10000.mp4',\n start_delay=3, end_delay=10, fps=60):\n midi = MidiParser(input_file)\n notes = midi.extract_notes()\n note_range = midi.get_note_range(notes)\n velocity_range = midi.get_velocity_range(notes)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test date pattern timed task | def test_date_pattern():
contact = FakeContact()
test_msg = 'this is a test message'
date = datetime.now() + timedelta(seconds=1)
async def task():
controller = TaskController()
command = KEY_TIMED_TASK + KEY_SPLIT + f'{str(date)}-{test_msg}'
assert controller.handle_msg(command... | [
"def test_nextdate_c2(self):",
"def test_uda_date_task(self):\n\n code, out, err = self.t(\"add with extra:tomorrow\")\n self.assertIn(\"Created task\", out)\n\n code, out, err = self.t(\"add without\")\n self.assertIn(\"Created task\", out)\n\n code, out, err = self.t(\"uda\")\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test cron pattern timed task | def test_cron_pattern():
async def task():
controller = TaskController()
command = KEY_TIMED_TASK + KEY_SPLIT\
+ '7-3-*-22-13-test message'
contact = FakeContact()
assert controller.handle_msg(command, contact, True)
asyncio.run(task()) | [
"def test_eval_schedule_cron(schedule):\n schedule.opts.update({\"pillar\": {\"schedule\": {}}})\n schedule.opts.update(\n {\"schedule\": {\"testjob\": {\"function\": \"test.true\", \"cron\": \"* * * * *\"}}}\n )\n now = datetime.datetime.now()\n schedule.eval()\n assert schedule.opts[\"sch... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Structuralize a list of fund infos to an Excel document. | def write_to_xlsx(fund_infos: List[FundInfo], xlsx_filename: Path) -> None:
# TODO profile to see whether and how much setting constant_memory improves
# performance.
with xlsxwriter.Workbook(xlsx_filename, {"constant_memory": True}) as workbook:
logger.log("新建 Excel 文档......")
worksheet =... | [
"def extract_data_to_excel(bases_arr):\r\n workbook = xlsxwriter.Workbook(FILENAME)\r\n worksheet = workbook.add_worksheet()\r\n for x in range(len(bases_arr)):\r\n worksheet.write(0, x, PEOPLE[x]) # the names of the people that chose the bases\r\n bases = [char for char in bases_arr[x]]\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the net value date is latest. Take advantage of the knowledge that fund info stays the same | def net_value_date_is_latest(net_value_date: date) -> bool:
now_time = datetime.now().time()
today = date.today()
yesterday = today - timedelta(days=1)
if time.min <= now_time < time(20):
return net_value_date == yesterday
else:
return net_value_date == today | [
"def is_latest():",
"def checkLatestTrend(stockcode,price):\r\n\tprint DealDatum[stockcode]",
"def _get_latest_datafile_date(self):\n\n latest_date = None\n\n for data in self.get_computationdata():\n\n datafile_date = data.datafile.get_local_last_modified()\n\n if latest_date and data... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the estimte datetime is latest. Take advantage of the knowledge that estimate info stays the same | def estimate_datetime_is_latest(estimate_datetime: datetime) -> bool:
open_market_time = time(9, 30)
close_market_time = time(15)
now_time = datetime.now().time()
today = date.today()
yesterday = today - timedelta(days=1)
today_close_market_datetime = datetime.combine(today, close_market_time)
... | [
"def is_latest():",
"def test_received_at_is_newest_date(base_store: Store, helpers):\n\n # GIVEN a database with a case and two samples with different received dates\n new_case = add_case(helpers, base_store)\n yesterday = datetime.now() - timedelta(days=1)\n yesteryear = datetime.now() - timedelta(d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if a string represents a valid fund code | def validate_fund_code(s: str) -> bool:
return bool(re.fullmatch(r"[0-9]{6}", s)) | [
"def valid_unit_code(unit_code):\n # matches 'ABC 1234'\n regex = r\"[a-zA-Z]{3} ?\\d{4}\"\n return re.match(regex, unit_code)",
"def validate(zip_code) -> bool:\n\n zip_code = str(zip_code).zfill(5)\n if len(zip_code) != 5: return False\n if not zip_code.isdigit(): return False\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a set of field distances and a data model return the indices of the record pairs in order of uncertainty. For example, the first indices corresponds to the record pair where we have the least certainty whether the pair are duplicates or distinct. | def findUncertainPairs(field_distances, data_model, bias=0.5):
probability = core.scorePairs(field_distances, data_model)
p_max = (1.0 - bias)
logging.info(p_max)
informativity = numpy.copy(probability)
informativity[probability < p_max] /= p_max
informativity[probability >= p_max] = (1 - pro... | [
"def _get_pair_list(queries, docs, labels, _make_indexed):\n while True:\n j=0\n for q, doc, label in zip(queries, docs, labels):\n doc, label = (list(t) for t in zip(*sorted(zip(doc, label), reverse=True)))\n for item in zip(doc, label):\n if item[1] == 1:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
saves the target actor and critic models | def save_models(self, fname):
torch.save(self.target_actor.state_dict(), str(fname) + '_actor.pt')
torch.save(self.target_critic.state_dict(), str(fname) + '_critic.pt')
print('Models saved successfully') | [
"def save(self, name) -> None:\n self.actor_model.save_weights('actor' + name)\n self.critic_model.save_weights('critic' + name)",
"def saveModelParams(self):\n for i, agent in enumerate(self.agents):\n torch.save(agent.actor_local.state_dict(), f\"actor_agent_{i}.pth\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
loads the target actor and critic models, and copies them onto actor and critic models | def load_models(self, fname):
self.actor.load_state_dict(torch.load(str(fname) + '_actor.pt'))
self.critic.load_state_dict(torch.load(str(fname) + '_critic.pt'))
self.hard_update(self.target_actor, self.actor)
self.hard_update(self.target_critic, self.critic)
print('Models loaded... | [
"def load(self, name) -> None:\n self.actor_model.load_weights('actor' + name)\n self.critic_model.load_weights('critic' + name)",
"def _construct_actor_critic(self):\n # Main actor and critic networks\n self.actor = Actor(self.sess, **self.actor_param)\n self.critic = Critic(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The Gaussian Naive Bayes suits the data and the data is fit using the classifier. No hyperparameters involved here. Naive Bayes utility from sklearn is used here. | def run_naive_bayes(self):
nb_classifier = GaussianNB() # Initialize the classifier with a kernel
nb_classifier.fit(self.X_train, self.y_train.ravel()) # Fit the training data
y_pred = nb_classifier.predict(self.X_test) # Predict the results on testing data and the classifier
self.pr... | [
"def naiveBayes(self):\n\n # Initialize our classifier\n name = \"NaiveBayes\"\n gnb = GaussianNB()\n\n # Train our classifier\n model = gnb.fit(self.X_train, self.y_train)\n\n # Make predictions\n preds = gnb.predict(self.X_test)\n\n print(\"***** Naive Baye... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines whether a string is nice. | def is_nice_string(string):
return has_three_vowels(string) and has_double_letter(string) and not has_a_forbidden_substring(string) | [
"def test_nice_strings(nice_string):\n assert is_nice(nice_string)",
"def niceness(string):\n\n return contains_three_vowels(string) and contains_at_least_one_letter(string) and not_contain_strings(string)",
"def is_nice(candidate):\n vowels = set(\"aeiou\")\n enough_vowels = len([c for c in candida... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines whether a string has three vowels. | def has_three_vowels(string):
pattern = re.compile(".*([aeiou].*){3,}")
return bool(pattern.match(string)) | [
"def contains_three_vowels(string):\n vovels = 'aeiou'\n return sum([1 if char in vovels else 0 for char in string]) >= 3",
"def has_three_vowels(s):\n count = 0\n for v in 'aeiou':\n count += s.count(v)\n return count >= 3",
"def has_three_consecutive_vowels(word):\n\n return re.search... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines whether a string has a double letter. | def has_double_letter(string):
pattern = re.compile(".*(.)\\1.*")
return bool(pattern.match(string)) | [
"def single_letter(word):\n\tif len(word)==1 and word!='a' and word!='I':\n\t\treturn True\n\treturn False",
"def contains_two_letters(word):\n return contains_dup_letters(word, 2)",
"def not_letter(character: str) -> bool:\n return character not in LETTERS",
"def contains_at_least_one_letter(string):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines whether a string has a forbidden substring. | def has_a_forbidden_substring(string):
return any(forbidden_string in string for forbidden_string in ["ab", "cd", "pq", "xy"]) | [
"def is_only_string(s):\n if not is_string(s):\n return False\n forbidden = [\" \"];\n for i in range(0,10):\n forbidden += str(i);\n return not any(el in s for el in forbidden)",
"def substring_check(self, str1, str2):\n return self.sanitize(str1) in self.sanitize(str2) or self.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create iteration between two dates. Reference | def daterange(date1, date2):
for n in range(int((date2 - date1).days) + 1):
yield date1 + dt.timedelta(n) | [
"def _date_range(start_date, end_date):\n\n for n in range(int((end_date - start_date).days)):\n yield start_date + timedelta(n)",
"def daterange(start_date, end_date):\n\n for n in range(int((end_date - start_date).days)):\n yield start_date + timedelta(n)",
"def generate(cls, start... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
search the GDAS1 meteorological files that can cover the time span from `start_time` to `start_time`. | def _search_GDAS1_meteor_file(self, start_time, stop_time, *args,
meteor_dir):
def gdas1_fname_from_date(time):
"""
Return the GDAS1 filename with the given datetime.
"""
months = {
1: 'jan', 2: 'feb', 3: 'mar', ... | [
"def search_vulcan_runs(record_data, start_time, end_time):",
"def check_recording_times(self, activity_dir):\n self.get_dwell_times(activity_dir)\n scans = self.collector.scaninfo.keys()\n scans.sort()\n tol = datetime.timedelta(seconds=6)\n tablekeys = self.tables.keys()\n tablekeys.sort()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the GDAS1 filename with the given datetime. | def gdas1_fname_from_date(time):
months = {
1: 'jan', 2: 'feb', 3: 'mar', 4: 'apr', 5: 'may', 6: 'jun',
7: 'jul', 8: 'aug', 9: 'sep', 10: 'oct', 11: 'nov', 12: 'dec'
}
week_no = ((time.day - 1) // 7) + 1
# determine the current 7 days... | [
"def filename_with_timestamp(base_name):\n return base_name + FileUtils.timestamp_string() + \".jpg\"",
"def get_sounding_file_name(sonde_path, sonde_name, time):\n year_str = \"%04d\" % time.year\n month_str = \"%02d\" % time.month\n day_str = \"%02d\" % time.day\n hour_str = \"%02d\" % time.h... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
run HYSPLIT ensemble trajectory. | def run_HYSPLIT_ens(self):
# prepare the SETUP.CFG
setupFile = os.path.join(self.hysplit_working_dir, 'SETUP.CFG')
with open(setupFile, 'w', encoding='utf-8') as fh:
fh.writelines("""
&SETUP
KMSL=0,
tm_rain=1,
tm_tpot=0... | [
"def train_split_run(args):\n # pylint: disable=unused-argument\n from .prepare.train_split import run\n\n run(equal_splits=args.equalsplits)",
"def run_HYSPLIT_list(self, taskFile, *args,\n meteor_dir='', mode='ens', station=\"wuhan\"):\n\n if (not os.path.exists(taskFile)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run HYSPLIT with the given task list file. | def run_HYSPLIT_list(self, taskFile, *args,
meteor_dir='', mode='ens', station="wuhan"):
if (not os.path.exists(taskFile)) or (not os.path.isfile(taskFile)):
logger.warning('{} does not exist.'.format(taskFile))
raise FileNotFoundError
# read taskFile
... | [
"def run_HYSPLIT_ens(self):\n\n # prepare the SETUP.CFG\n setupFile = os.path.join(self.hysplit_working_dir, 'SETUP.CFG')\n with open(setupFile, 'w', encoding='utf-8') as fh:\n fh.writelines(\"\"\"\n &SETUP\n KMSL=0,\n tm_rain=1,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the fidelity degree of a qubit. F_degree_Qi = lambda sum(1 E[Qi][Qj]) + (1 R_Qi), Qj are the neighbor qubits of Qi. | def qubit_fidelity_degree(qubit_index: int,
hardware: IBMQHardwareArchitecture,
cnot_error_matrix: np.ndarray,
readout_error: ty.List,
weight_lambda: int,):
degree = 0.0
for neighbour in hardware.neighbors(qubit_index):
degree += (1 - c... | [
"def degree_on_basis(self, I):\n return I.size()",
"def dmp_degree_in(f, j, u):\n if not j:\n return -oo if dmp_zero_p(f, u) else len(f) - 1\n\n if j < 0 or j > u:\n raise IndexError(\"0 <= j <= %s expected, got %s\" % (u, j))\n\n def degree_in(g, v, i, j):\n if i == j:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For a given list of qubits, find the qubit with the highest fidelity degree. | def find_best_qubit(qubit_list: ty.List,
hardware: IBMQHardwareArchitecture,
cnot_error_matrix: np.ndarray,
readout_error: ty.List,
weight_lambda: int,):
best_qubit_fidelity_degree = -1
best_qubit = -1
for qubit in qubit_list:
... | [
"def find_all_highest(l, f):\n if len(l) == 1: return l\n maxvalue = max([f(x) for x in l])\n return [x for x in l if f(x) == maxvalue]",
"def find_start_qubit(self,\n qubit_list: List[cirq.Qid],\n depth=3) -> Optional[cirq.GridQubit]:\n best = None\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the qubit to merge into the partition. First, we choose the best qubit(highest fidelity degree) of the current partition. Second, from the neighour qubits of the best qubit, we choose the one with highest fidelity degree to merge into the partition. | def find_qubit(partition: ty.List,
hardware: IBMQHardwareArchitecture,
cnot_error_matrix: np.ndarray,
readout_error: ty.List,
weight_lambda: int,):
partition = sorted(partition, key=lambda x: qubit_fidelity_degree(x, hardware, cnot_error_matrix, readout_e... | [
"def find_start_qubit(self,\n qubit_list: List[cirq.Qid],\n depth=3) -> Optional[cirq.GridQubit]:\n best = None\n best_count = -1\n for q in qubit_list:\n c = self.qubits_within(depth, q, qubit_list, set())\n if c > best_coun... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the physical node degree of the physical qubit and the largest physical node degree. | def hardware_qubit_physical_degree(hardware: IBMQHardwareArchitecture):
qubit_degree = defaultdict(list)
largest_physical_degree = 0
for num in range(hardware.qubit_number):
degree = hardware.degree(num) / 2
if degree > largest_physical_degree:
largest_physical_degree = degree
... | [
"def degree(self,u):\n return len(self.get_node(u))",
"def qubit_fidelity_degree(qubit_index: int,\n hardware: IBMQHardwareArchitecture,\n cnot_error_matrix: np.ndarray,\n readout_error: ty.List,\n weight_lambda: int,):\n degree = 0.0\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterate over all the gates of the circuit and obtain the largest logical node degree of the logical qubit. | def largest_circuit_logical_degree(circuit: QuantumCircuit):
logical_qubit_degree = defaultdict(list)
qasm_file = circuit.qasm().split(';')
for line in qasm_file:
line = line.split()
if not line:
continue
if line[0] == 'OPENQASM':
continue
if line[0] =... | [
"def find_max_independent_set(graph, params):\n\n max_ind_set = []\n\n # QHACK #\n from pennylane import qaoa\n\n dev = qml.device(\"default.qubit\", wires=NODES)#, analytic=True, shots=2)\n \n #pauli_z = [[1, 0], [0, -1]]\n #pauli_z_6 = np.kron(pauli_z, np.kron(pauli_z,np.kron(pauli_z,np.kr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If largest_physical_degree < largest_logical_degree, the set of physical qubits with the largest physical node degree is collected as the list of starting points. Else, the physical qubits whose physical node degree is not less than the largst logical node degree are collected as starting points. | def starting_point_heuristic(hardware_qubit_physical_degree: ty.Dict,
largest_physical_degree: float,
largest_logical_degree: int):
staring_points = []
if largest_physical_degree < largest_logical_degree:
return hardware_qubit_physical_degree[lar... | [
"def hardware_qubit_physical_degree(hardware: IBMQHardwareArchitecture):\n qubit_degree = defaultdict(list)\n largest_physical_degree = 0\n for num in range(hardware.qubit_number):\n degree = hardware.degree(num) / 2\n if degree > largest_physical_degree:\n largest_physical_degree ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if dunder method call is within a dunder method definition. | def within_dunder_def(node: nodes.NodeNG) -> bool:
parent = node.parent
while parent is not None:
if (
isinstance(parent, nodes.FunctionDef)
and parent.name.startswith("__")
and parent.name.endswith("__")
):
return T... | [
"def is_method(self, line):\n # We only want the first token in the line, to avoid false positives.\n # That is, the word 'def' appearing in some other context.\n tokens = line.split()\n if tokens:\n first_token = tokens[0]\n return first_token == 'def'\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
depending on the needs, will create many instances of the same PipeModule will initialize more instances depend on needs. | def _create_instances(self):
#initialize the module
_instance = self._module()
self._instance_list = [_instance] | [
"def __init__(self):\r\n self.configureLogging()\r\n logging.info(\"Starting pipeline\")\r\n pool = self.parseArguments()\r\n for lib in pool.libs:\r\n self.preprocess(lib) \r\n \r\n self.doGenomeSizeEstimation(pool)\r\n \r\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check if the input of the first frame eq to the output of the second frame ret a tuple of (True or False, ) | def check_frame_compat(self):
for i in range(self.num_of_frames - 1):
#get the dims
first_output_dims = self._frames[i]._frame_output_dims
second_input_dims = self._frames[i+1]._frame_input_dims
#need to return all non-matching frames
if not first_... | [
"def verify(h1, h2, p):\n s1 = get_spectrum(h1, p)\n s2 = get_spectrum(h2, p)\n return(s1 == s2)",
"def is_request_and_response_pair(request, response):\n if not isinstance(request, HTTPCommunicationModel) or \\\n not isinstance(response, HTTPCommunicationModel):\n return False\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the participant's last_{ach,bill}_result in the DB. Also update receiving amounts of the participant's tippees. | def store_result(db, thing, participant, new_result):
assert thing in ("credit card", "bank account"), thing
column = 'last_%s_result' % ('bill' if thing == 'credit card' else 'ach')
old_result = getattr(participant, column)
# Update last_thing_result in the DB
db.run("""
UPDATE participant... | [
"def update_recurring_bill_success(self):\n\t\tself.last_bill_date = datetime.now()\n\t\tself.last_payment_failed = False\n\t\tself.last_payment_state = 'P' # PAID_STATE\n\t\tself.save()",
"def process_result(self, winner, loser):\n competitors = self.db['competitors']\n losers = self.db['losers']\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given four unicodes, return a unicode. This function attempts to associate the credit card or bank account details referenced by balanced_thing_uri with a Balanced Account. If it fails we log and return a unicode describing the failure. Even for failure we keep balanced_customer_href; we don't reset it to None/NULL. It... | def associate(db, thing, participant, balanced_account, balanced_thing_uri):
typecheck( participant, Participant
, balanced_account, balanced.Customer
, balanced_thing_uri, unicode
, thing, unicode
)
invalidate_on_balanced(thing, balanced_account)
try:
... | [
"def format_account(service_name, data):\n\n if \"username\" not in data:\n raise KeyError(\"Account is missing a username\")\n\n account = {\n \"@type\": \"Account\",\n \"service\": service_name,\n \"identifier\": data[\"username\"],\n \"proofType\": \"http\"\n }\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the DB backend used to create this dataset | def get_backend(self):
raise NotImplementedError('Please implement me') | [
"def create_backend(self):\n backend_cls = self.get_backend_cls()\n return backend_cls(connection=self)",
"def get_backend():\n\tconf = settings.STORAGE_BACKEND\n\tname, options = conf[\"name\"], conf[\"options\"]\n\n\treturn backends[name](options)",
"def preferred_backend():\n return _preferr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |