query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Iterator yielding 1 HyperLogLog.hmap per sequence in given iterable list_of_sequences iterable of iterable | def compute_hmaps(list_of_sequences):
for sequence in list_of_sequences:
hll = HLL.HyperLogLog64(k)
hll.extend(sequence)
yield hll | [
"def init_compute_hmaps(k):\n def compute_hmaps(list_of_sequences):\n \"\"\"\n Iterator yielding 1 HyperLogLog.hmap per sequence in given iterable\n \n list_of_sequences - iterable of iterable\n \"\"\"\n for sequence in list_of_sequences:\n hll = HLL.HyperLogL... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an estimate to the number of distinct elements in items items a sequence of elements k number of hash functions spark_context a spark context | def estimate_distinct_elements_parallel(lists_of_items, k, spark_context):
hll = spark_context.parallelize(lists_of_items) \
.mapPartitions(init_compute_hmaps(k)) \
.reduce(lambda x, y :x + y)
return hll.cardinality | [
"def estimate_distinct_elements(items, k):\n hll = HLL.HyperLogLog64(k)\n hll.extend(items)\n return hll.cardinality",
"def get_count_distinct_user():\n count_distinct_users = rdd_review_data\\\n .map(lambda x: x[\"user_id\"])\\\n .distinct()\\\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
| The sellers geocode | def geocode(self):
return self.__geocode | [
"def geocode(self, recode=False):\n if not self.lat or not self.long or recode:\n # get the geocoordinates for the adress\n # TODO log geocodings into the db\n g = geocoders.Google(settings.GOOGLE_API_KEY)\n adr = '%s, %s %s, %s' % (self.street, self.zipcode, self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
| Invoice number of the payment | def invoice_number(self):
return self.__invoice_number | [
"def generate_invoice_no(self):\n part1 = self.compName\n print(\"CNAME:\", str(part1))\n cname = str(part1)\n cnamehalf = cname[:3]\n t = datetime.datetime.now()\n t = t.strftime('%m%d%Y%H%M%S')\n par = str(t)\n part2 = str(par)\n aaa = cnamehalf.upper... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a key for `COURSIER_CACHE` determined by the configured repositories. This helps us avoid a cache poisoning issue that we uncovered in 14577. | def _coursier_cache_prefix(self) -> str:
sha = sha256()
for repo in self.repos:
sha.update(repo.encode("utf-8"))
return sha.digest().hex() | [
"def _get_cache_key(self, **kwargs):\n key = 'cartodb_%s_' % _geohash.encode(\n kwargs.pop('lat'), kwargs.pop('lon'))[:8]\n key += '_'.join([\n '%s=%s' % (k, kwargs[k]) for k in sorted(kwargs.iterkeys())])\n return key",
"def get_cache_key(self):\n\n return self.c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the range of the doselevels of a guideline drugUsagesSlo | def printDoseLevelRange(outputDestination, doseLevel):
ll = doseLevel.getOwnSlotValue(kb.getSlot("lower_limit"))
ul = doseLevel.getOwnSlotValue(kb.getSlot("upper_limit"))
av = doseLevel.getOwnSlotValue(kb.getSlot("abstract_value"))
if (ll):
llString = roundFloat(ll,1)
else:
ll... | [
"def test_get_drillstrings(self):\n drillstrings = self.corva.get_drillstrings(self.well_name)\n self.assertListEqual([drillstring.start_depth for drillstring in drillstrings], [15735, 9404, 8399, 6314, 1])",
"def levelOfDetailRange(self): # real signature unknown; restored from __doc__\n pas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Importable function that accepts a prompt and a time (in seconds) This function waits for an input and returns an empty string if a TimeoutExpired exception is raised. If an input is made before the set timer expires, the function returns the input | def timed_input(prompt='', timer=10):
try:
answer = __input_with_timeout(prompt, timer)
except TimeoutExpired:
return ''
else:
return answer | [
"def ask_time():\n # get time spent on task in minutes\n while True:\n user_time = input(\"Please enter the time spent on task in minutes >\")\n if checkers.return_int(user_time):\n output = checkers.return_int(user_time)\n break\n return output",
"def pause(msg=\"\",t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle all nodes of the listed node classes. | def process(self):
for node_class in self.setup["node_classes"]:
for node in nuke.allNodes(recurseGroups=True):
class_name = node.Class()
if class_name != node_class:
continue
self.logger.info("%s '%s' because its node class (%s) i... | [
"def node_classes(self):\n node_classes = []\n for node_type, node_attrs in data.iteritems():\n node_class = node_attrs.get('class', None)\n \n if node_class and node_class not in node_classes:\n node_classes.append(node_class)\n return sorted(nod... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tuples of (sample, read group) names. | def unit_names(self):
return [UnitName(sample.name, rg.name)
for sample in self.samples.values()
for rg in sample.read_groups.values()] | [
"def sample_names(self):\n return self._sample_names",
"def get_read_group(wildcards):\n return r\"-R '@RG\\tID:{run}\\tSM:{sample}-{condition}\\tPL:{platform}'\".format(\n sample=wildcards.sample,\n condition=wildcards.condition,\n run=units.loc[(wildcards.sample, wildcards.unit, w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the given setting key if it has not yet been set. | def set_default_setting(self, key, value):
if self.settings.get(key) is None:
self.settings[key] = value | [
"def set_value(parameter, key, value):\n if not key in parameter:\n parameter[key] = value",
"def SetKeyword(key, value):",
"def set(self, workflow_id: str, key: str, value: Optional[str]) -> None:\n raise NotImplementedError",
"def direct_set(self, key: str, value):\n set_store_value(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the working directory of the run A default value can optionally be given if the ``settings.workdir`` key is not defined in the config. | def get_workdir(self, default=None):
return getnattr(self._raw, ["settings", "workdir"], default) | [
"def _driver_workdir(self):\n return self.config.get('workdir', '/tmp')",
"def get_worker_working_directory():\n working_directory = \"tmp/netSLS\"\n if _CONFIG_PARSER.has_option(\"DEFAULT\", \"WorkerWorkingDirectory\"):\n working_directory = _CONFIG_PARSER.get(\"DEFAULT\", \"WorkerWorkingDire... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test VersionSelector().pick_winner() with "None" versions. | def test_basic_selector_none_versions(unsupported_cve_none_versions):
candidates = [
PackageNameCandidate('io.vertx:testtools', Decimal('10.0')),
]
selector = VersionSelector(unsupported_cve_none_versions, candidates, 'java')
winner = selector.pick_winner() # don't throw TypeError here
asse... | [
"def test_none():\n ver = _version.Version(\"\", True, 8)\n assert_equals(unicode(ver), u'0.0.0-dev-r8')\n\n ver = _version.Version(\"\", False, 9)\n assert_equals(unicode(ver), u'0.0.0')",
"def test_election_winners_0_votes() -> None:\n e = Election(date(2000, 2, 8))\n e.update_results('r1', 'n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
open connection with postgres | def open(self):
conn_string = f"host={self.host} user={self.user} password={self.password} dbname={self.dbname} port={self.port}"
try:
self.conn = psycopg2.connect(conn_string)
print("POSTGRES::Connection established")
except Exception as e:
print(str(e)) | [
"def setup_postgres():\n conn = psycopg2.connect(\"postgresql://python:{}@{}:5432/kin\".format(PYTHON_PASSWORD, POSTGRES_HOST))\n logging.info('Successfully connected to the database')\n return conn",
"def init_postgresql_connection():\n connection = connect(user='test',\n pass... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
fetch_pdb_chain_uniprot with existing file | def test_fetch_pdb_chain_existing_file_pass(self):
success_msg = re.compile(
r'^Found local copy of.*',
re.IGNORECASE
)
chain_fp = os.path.join(
self.test_data_dp,
'data',
'initial_filtering_data',
'tsv_data',
'... | [
"def read_lookup_table(filename):\n\n pdb_id_file = open(filename, \"r\")\n uniprot_pdb_dict = {}\n for line in pdb_id_file:\n pdb_id = str(line[7:-1])\n uniprot_id_for_dict = str(line[:-6])\n uniprot_pdb_dict.setdefault(uniprot_id_for_dict,[]).append(pdb_id)\n\n return uniprot_pdb_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Apply the JordanWigner transform to a FermionOperator, InteractionOperator, or DiagonalCoulombHamiltonian to convert to a QubitOperator. | def jordan_wigner(operator):
if isinstance(operator, FermionOperator):
return _jordan_wigner_fermion_operator(operator)
if isinstance(operator, MajoranaOperator):
return _jordan_wigner_majorana_operator(operator)
if isinstance(operator, DiagonalCoulombHamiltonian):
return _jordan_wig... | [
"def convert_wpo_and_openfermion(operator):\n def _count_qubits(openfermion_operator):\n \"\"\" Counts the number of qubits in the openfermion.operator\"\"\" \n nb_qubits = 0\n for sett, coef in openfermion_operator.terms.items():\n if len(sett)>0:\n nb_qubits = max... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the base Pool Info | def test_01_Base(self):
l_pool = poolXml._read_base(self.m_xml.pool)
# print(PrettyFormatAny.form(l_pool, 'R1-01-A - Pool'))
self.assertEqual(l_pool.Name, TESTING_POOL_NAME_0)
self.assertEqual(l_pool.Key, int(TESTING_POOL_KEY_0))
self.assertEqual(str(l_pool.Active), TESTING_POOL_... | [
"def _getpool():\n logging.info(\n \"Creating a process pool with pool size {processes} (the number of CPU cores)...\".format(\n processes=os.cpu_count() or 1))\n return Pool()",
"def _get_pool_object(self):\n\n pool = [{\"status\": \"ACTIVE\",\n \"lb... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write pool base info | def test_01_Base(self):
# print(PrettyFormatAny.form(self.m_pools[0], 'W1-01-A - Pools'))
l_xml = poolXml._write_base(self.m_pools[0])
# print(PrettyFormatAny.form(l_xml, 'W1-01-B - Pool'))
self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)
self.assertEqual(l_xml.attrib[... | [
"def test_01_Base(self):\n l_pool = poolXml._read_base(self.m_xml.pool)\n # print(PrettyFormatAny.form(l_pool, 'R1-01-A - Pool'))\n self.assertEqual(l_pool.Name, TESTING_POOL_NAME_0)\n self.assertEqual(l_pool.Key, int(TESTING_POOL_KEY_0))\n self.assertEqual(str(l_pool.Active), TES... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write one entire pool XML | def test_02_Pool0(self):
l_xml = poolXml._write_one_pool(self.m_pools[0])
# print(PrettyFormatAny.form(l_xml, 'W1-02-A - Pool 0'))
self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)
self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)
self.assertEqual(l_xml.attrib['... | [
"def test_03_Pool1(self):\n l_xml = poolXml._write_one_pool(self.m_pools[1])\n # print(PrettyFormatAny.form(l_xml, 'W1-03-A - Pool 1'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_1)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_1)\n self.assertEqual(l_xm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write one entire pool XML | def test_03_Pool1(self):
l_xml = poolXml._write_one_pool(self.m_pools[1])
# print(PrettyFormatAny.form(l_xml, 'W1-03-A - Pool 1'))
self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_1)
self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_1)
self.assertEqual(l_xml.attrib['... | [
"def test_02_Pool0(self):\n l_xml = poolXml._write_one_pool(self.m_pools[0])\n # print(PrettyFormatAny.form(l_xml, 'W1-02-A - Pool 0'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write Pool Section with all pools. | def test_04_AllPools(self):
l_xml, l_count = poolXml.write_all_pools_xml(self.m_pyhouse_obj)
# print(PrettyFormatAny.form(l_xml, 'W1-04-A - Pool'))
# l_xml1 = l_xml.find('Pool')
l_xml2 = l_xml[0]
self.assertEqual(l_xml2.attrib['Name'], TESTING_POOL_NAME_0)
self.assertEqua... | [
"def writeAssignments(hashring, filename):\n logging.debug(\"Dumping pool assignments to file: '%s'\" % filename)\n\n try:\n with open(filename, 'a') as fh:\n fh.write(\"bridge-pool-assignment %s\\n\" %\n time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n hashring.dumpAss... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a key, return an item from the cache. Return None if the item is not in the cache. | def get(self, key):
#return none if the item isn't in the cache
if key not in self.items:
return None
#retrieve the item from the dictionary
item = self.items[key]
#move it to the front of the list since it is the
#most recently accessed item
self._m... | [
"def __getitem__(self, key):\n\n # check for slycat path\n self.check_fs_path()\n\n # is item in cache?\n if key in self:\n\n # get hash and value\n digest = self.digest_hash(key)\n value = self._loaded[digest].value\n expired = self._loaded[digest].expired()\n\n # if expired, e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a key and a value, add an item to the cache. If the cache is full, the least recently used item will be evicted. | def put(self, key, value):
#first check if item in already in the cache
item = self.items.get(key, None)
#if not create a new item
if item is None:
#if the cache is full, evict the last item
if self.is_full():
self._evict()
item = Cach... | [
"def add(self, key, value):\n self.m_cache[key] = [self.m_time, key, value]\n self.m_time += 1\n self.expire()",
"def add_to_cache(self, key, value):\n self._cache_data[key] = value\n self._dirty = True",
"def add(self, key, value):\n key = self.get_key(key)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert the given item to the front of the linked list. | def _push_front(self, item):
#point the item's previous pointer to head and its
#next pointer to the item after the head
item.prev = self.head
item.next = self.head.next
#the item is still not fully in the linked list yet
#point the item after the head's previous pointer... | [
"def enqueue_front(self, item):\n self.list.append(item)",
"def insert_front(self, data):\n node = ListNode(data)\n if self.head:\n node.next = self.head\n self.head = node",
"def enqueue_back(self, item):\n self.list.prepend(item)",
"def insert_before(self, item)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Applies all contained diffs to the given file_dict. This operation will be done inplace. | def apply(self, file_dict: dict):
for filename, diff in self.diffs.items():
file_dict[filename] = diff.modified | [
"def update(self, files_dict):\n\n # take the FolderProcessor's result and compare it to the current DB",
"def diff(self) -> None:\n\n # We do not write to the translated files directly.\n self.copy_files()\n new_file = dict()\n old_file = dict()\n i = 0\n for old_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines if the result overlaps with source ranges provided. | def overlaps(self, ranges):
if isinstance(ranges, SourceRange):
ranges = [ranges]
for range in ranges:
for self_range in self.affected_code:
if range.overlaps(self_range):
return True
return False | [
"def overlap(r1: Rule, r2: Rule):\n if max(r1.src[0], r2.src[0]) > min(r1.src[1], r2.src[1]):\n return False\n if max(r1.dst[0], r2.dst[0]) > min(r1.dst[1], r2.dst[1]):\n return False\n return True",
"def overlaps(self, other):\n return not (self.start > other.end or self.end < other... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attempts to return a metrics from an Apache Beam PipelineResults. | def get_pipeline_metric(pipeline_results, metric_name, index=0,
result_type='counters'):
metrics_filter = MetricsFilter().with_name(metric_name)
query_result = pipeline_results.metrics().query(metrics_filter)
try:
return query_result[result_type][index].committed
except IndexError:
... | [
"def get_metrics_dict(results):\n raise NotImplementedError",
"def publish_metrics(\n self, result: PipelineResult, extra_metrics: Optional[dict] = None):\n metric_id = uuid.uuid4().hex\n metrics = result.metrics().query(self.filters)\n\n # Metrics from pipeline result are stored in map with ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable logging to Humio if the `token` and `dataspace` are set in the `secrets` payload. | def configure_control(configuration: Configuration, secrets: Secrets):
token = secrets.get("humio", {}).get("token", "").strip()
if not token:
logger.debug("Missing Humio token secret")
with_logging.enabled = False
return
dataspace = secrets.get("humio", {}).get("dataspace", "").str... | [
"def issue_influxdb_token(self, token_data: TokenData) -> str:\n secret = self._config.influxdb_secret\n if not secret:\n raise NotConfiguredException(\"No InfluxDB issuer configuration\")\n if self._config.influxdb_username:\n username = self._config.influxdb_username\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Always write usage_stats.json regardless of report success/failure. If report fails, the error message should be written to usage_stats.json If file write fails, the error will just stay at dashboard.log. usage_stats.json won't be written. | def _report_usage_sync(self):
if not self.usage_stats_enabled:
return
try:
self._fetch_and_record_extra_usage_stats_data()
data = ray_usage_lib.generate_report_data(
self.cluster_config_to_report,
self.total_success,
s... | [
"def testReportFileWriteOutAfterEachSuiteReportReport(self):\n self.reporter.SetReportFile(self.file_name)\n self.reporter.SuiteReport('PassOnReportFile',\n constants.PASS,\n 'suite line')\n self.assertTrue(os.path.isfile(self.file_name) and\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a time in clocks to a time in s | def clocks_to_s(time, clock_cycle=20e-9):
return time*clock_cycle | [
"def clocks_to_s(time, clock_cycle=20e-9):\n return time * clock_cycle",
"def convert_time_to_seconds(time_string):\n if time_string[-1] == 's':\n return int(time_string[:-1])\n else:\n denominations = [int(t) for t in time_string.split(':')]\n converts = [60**i for i in reversed(ran... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get's the expected tqisa filename based on the qisa filename. | def infer_tqisa_filename(qisa_fn: str):
return qisa_fn[:-4]+'tqisa' | [
"def test_source_ttf_font_filename_equals_familystyle(self):\n ttfont = Font.get_ttfont(self.operator.path)\n\n style_name = ttfont.stylename\n if style_name == 'Normal' or style_name == 'Roman':\n style_name = 'Regular'\n\n expectedname = '{0}-{1}'.format(ttfont.familyname.re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes in a line of a tqisa file and returns the starting time. This corrects for the timing in the "bs" instruction. Time is in units of clocks. | def get_start_time(line: str):
start_time = int(line.split(':')[0])
if 'bs' in line:
# Takes the second character after "bs"
pre_interval = int(line.split('bs')[1][1])
start_time += pre_interval
return start_time | [
"def get_start_time(self):\n with open(self.fp_file, 'r') as f:\n lines = f.readlines()\n starttime = 999999999999\n for x in lines:\n if 'TRACK_TIME' in x:\n ttemp = float(x[11:])\n starttime = min(starttime, ttemp)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts the map for the smis and smit qubit registers from a qisa file | def get_register_map(qisa_fn: str):
reg_map = {}
with open(qisa_fn, 'r') as q_file:
linenum = 0
for line in q_file:
if 'start' in line:
break
if 'smis' in line or 'smit' in line:
reg_key = line[5:line.find(',')]
start_reg_id... | [
"def make_rms_map():\n\tpath = '/nfs/slac/g/ki/ki19/deuce/AEGIS/unzip/'\n\tfile_name = path+'seg_ids.txt'\n\tall_seg_ids = np.loadtxt(file_name, delimiter=\" \",dtype='S2')\n\t#all_seg_ids=['01']\n\tfilters = ['f606w', 'f814w']\n\tfor f in filters:\n\t\tfor fl in glob.glob(path+f+'/*_rms.fits'):\n\t\t\tos.remove(fl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes part of an instruction and splits it into a tuple of codeword, target | def split_instr_to_op_targ(instr: str, reg_map: dict):
cw, sreg = instr.split(' ')
target_qubits = reg_map[sreg]
return (cw, target_qubits) | [
"def parse_instruction(self, line):\n instruction, *args = line.strip().replace(',', '').split()\n return instruction, args",
"def get_instructions ():\n try:\n instruction = sys.argv[1]\n try:\n target_id = int (sys.argv[2])\n return (instruction, target_id)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns time tuples of the form (start_time, operation, target_qubits, line_nr) | def get_timetuples(qisa_fn: str):
reg_map = get_register_map(qisa_fn)
tqisa_fn = infer_tqisa_filename(qisa_fn)
time_tuples = []
with open(tqisa_fn, 'r') as tq_file:
for i, line in enumerate(tq_file):
# Get instruction line
if re.search(r"bs", line):
# Get... | [
"def _get_time_info(self, logs):\n hours = timedelta(0)\n tasks = {} # task: timedelta\n\n for entry in logs:\n delta = entry['stop'] - entry['start']\n hours += delta\n if len(entry['task']):\n if entry['task'] in tasks:\n tas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of tuples that perform a specific operation | def get_operation_tuples(time_tuples: list, target_op: str):
op_indices = find_operation_idx_in_time_tuples(time_tuples,
target_op=target_op)
time_tuples_op = []
for op_idx in op_indices:
time_tuples_op.append(time_tuples[op_idx])
return time_t... | [
"def x_ops(ops,L,t):\n result = []\n for j in range(len(ops)):\n op = ops[j]\n result.append('Result of %s'%op)\n result.append(execute_op(op,L,t))\n return result",
"def makeops(op, lists):\n return tuple(l[0] if len(l) == 1 else build(op, l) for l in lists)",
"def _get_ops(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
determines if compilation of a file is needed based on it's timestamp and an optional recompile option. The behaviour of this function depends on the recompile argument. | def check_recompilation_needed(program_fn: str, platf_cfg: str,
recompile=True):
if recompile == True:
return True
elif recompile == 'as needed':
try:
if is_more_rencent(program_fn, platf_cfg):
return False
else:
... | [
"def check_recompilation_needed(\n program_fn: str,\n platf_cfg: str,\n recompile=True\n) -> bool:\n if recompile is True:\n return True # compilation is enforced\n elif recompile == 'as needed':\n # In case you ever think of a hash-based check mind that this\n # fun... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is a helper function for running an experiment that is spread over multiple OpenQL programs of varying length such as GST. Everytime the detector is called it will also modify the number of sweep points in the detector. | def load_range_of_oql_programs_varying_nr_shots(programs, counter_param, CC,
detector):
program = programs[counter_param()]
counter_param((counter_param()+1) % len(programs))
CC.eqasm_program(program.filename)
detector.nr_shots = len(program.sweep_points) | [
"def load_range_of_oql_programs_varying_nr_shots(\n programs,\n counter_param,\n CC,\n detector\n) -> None:\n program = programs[counter_param()]\n counter_param((counter_param() + 1) % len(programs))\n CC.eqasm_program(program.filename)\n\n detector.nr_shots = len(program.sw... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renders the allbuild template to see all builds | def all_builds():
return render_template("allbuilds.html", builds=mongo.db.build.find()) | [
"def show_pubbuilds(request):\n builds = BuildsTable.objects.filter(access_r='pub').order_by('-time')\n return render(request, 'build_public.html', {'builds': builds})",
"def show_mybuilds(request):\n builds = BuildsTable.objects.filter(user=request.user).order_by('time')\n return render(request, 'bui... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the build's Id to populate the forms with that build values for editing finds all collection data to populate the forms with options | def edit_build(build_id):
build = mongo.db.build.find_one({"_id": ObjectId(build_id)})
motherboards = mongo.db.motherboard.find()
processors = mongo.db.processor.find()
processor_coolers = mongo.db.processorcooler.find()
memory = mongo.db.memory.find()
graphics_cards = mongo.db.graphicscard.fin... | [
"def _build_forms_from_get(self):\n \n if self.config_id is None:\n # New form\n \n initial_values = []\n if 'data_file' in self.request.GET:\n initial_values = [{'data_runs': self.request.GET.get('data_file', '')}]\n ScanFormSe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the build Id and sends updated data to mongo Then gets redirected to all builds page | def update_build(build_id):
build = mongo.db.build
build_params = {
'build_name': request.form.get('build_name'),
'motherboard': request.form.get('motherboard'),
'processor': request.form.get('processor'),
'processor_cooler': request.form.get('processor_cooler'),
'memory'... | [
"def edit_build(build_id):\n\n build = mongo.db.build.find_one({\"_id\": ObjectId(build_id)})\n motherboards = mongo.db.motherboard.find()\n processors = mongo.db.processor.find()\n processor_coolers = mongo.db.processorcooler.find()\n memory = mongo.db.memory.find()\n graphics_cards = mongo.db.gr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the build Id and deletes it form the database Then gets redirected to all builds page | def delete_build(build_id):
mongo.db.build.delete_one({'_id': ObjectId(build_id)})
return redirect(url_for('all_builds')) | [
"def delete_build_view(request, node_id):\n node = get_object_or_404(Node, pk=node_id)\n build = get_object_or_404(Build, node=node)\n \n # Check that the user has delete permission for the actual model\n node_modeladmin = get_modeladmin(Node)\n if not node_modeladmin.has_change_permission(request... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create and bind db_pool before start serving requests | async def create_db_pool() -> None:
create_redis_pool = functools.partial(aioredis.create_redis_pool, encoding="utf-8")
redis_uri = f"redis://{REDIS_HOST}:{REDIS_PORT}"
redis = await trio_asyncio.run_asyncio(create_redis_pool, redis_uri)
app.db_pool = Database(redis) | [
"def dbpool(db_params):\n\n db_pool = psycopg2.pool.SimpleConnectionPool(1, 8, \n user=db_params[\"user\"],\n database=db_params[\"database\"])\n \n if(db_pool):\n # print(\"Connection pool created successfully\")\n\n return db_pool",
"async def acquire_pool(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set image03 attributes that can be extracted from the (formatindependent) volume | def _extract_attributes_from_volume(self):
vol = nibabel.load(self.nifti_1)
try:
(xyz_units, t_units) = vol.get_header().xyzt_units()
except:
(xyz_units, t_units) = (None, None)
if xyz_units == 'mm':
xyz_units = 'Millimeters'
elif xyz_units == ... | [
"def incorrect_setting_for_volume_in_cm3_1():\n\n test_material = nmm.Material.from_library(\n name=\"Li4SiO4\",\n enrichment=50.0,\n enrichment_target=\"Li6\",\n enrichment_type=\"ao\",\n volume_in_cm3=\"1.0\",\n )\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
report whether the file or S3 object exists | def exists(self):
(bucket_name, object_name) = self.source[5:].split('/', 1)
s3 = S3Connection(self._s3_access_key,
self._s3_secret_key,
calling_format=OrdinaryCallingFormat())
try:
bucket = s3.get_bucket(bucket_name)
exce... | [
"def file_exists( s3_path ):\n\n return _get_key(s3_path).exists()",
"def s3_file_exists(file_path, bucket_name):\n try:\n boto3.client('s3').head_object(Bucket=bucket_name, Key=file_path)\n except ClientError:\n # Not found\n return False\n return True",
"def test_asset_saintsx... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the ceil to dp decimal places (to payback borrowed amounts). Includes 0.1% trading fee | def binance_ceil(x:float, dp:float):
return math.ceil(x*1.001*(10 ** dp))/(10 ** dp) | [
"def roundAmount(buyPrice, balance, stepSize):\n\tbuyPrice = float(buyPrice)\n\tbalance = float(balance)\n\tstepSize = float(stepSize)\n\tamount = (balance / buyPrice) - (balance / buyPrice % stepSize)\n\tamount = format(amount, '.8f')\n\treturn amount",
"def get_fee(market, price):\r\n return round(market.api... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns dict for isolated margin account for base_asset. Enter base_asset as 'FET'. Do NOT include USDT | def get_isolated_margin_account(client, asset: str):
c = client.get_isolated_margin_account()
return list(filter(lambda x: x["baseAsset"]["asset"] == asset, c["assets"]))[0] | [
"def get_asset_balance(self):\n return self.client.get_asset_balance(asset)",
"async def get_margin_account(self, **params):\r\n return await self.client_helper(\"get_margin_account\", **params)",
"def account_map():\n return wallet['obj'].account_map",
"def _asset_info(self, node, date):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the price as float. pair MUST include USDT, ie ZECUSDT | def get_price(client, pair:str):
return float(client.get_recent_trades(symbol=pair, limit=1)[0]["price"]) | [
"def decimalize_price(t):\n return \"{0:.2f}\".format(float(t[0]))",
"def _price_str_to_float(price_str: str) -> float:\n return float((price_str[4:]).replace(',', '.'))",
"def get_btcprice():\n bitcoin_api_url = \"https://api.alternative.me/v2/ticker/bitcoin/?convert=CAD\"\n response = requests... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the USDT balance in spot as float | def get_usdt_balance(client):
return float(client.get_asset_balance(asset='USDT')["free"]) | [
"def balance(self):\n return Amount(self._balance, \"usd\")",
"def getUninvested(self) -> float:\n record = self.conn.execute(\"\"\"SELECT amount FROM uninvested\"\"\").fetchone()\n if record:\n return float(record[0])\n else:\n return 0",
"def get_balan... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes frame image to file. | def writeFrame(self, frameNum, img):
# use me pattern in for dest
frameFilename = self.dest % frameNum
print "write " + frameFilename
# write file
if not(img):
img = Image.new("RGB", self.size, "White")
img.save(frameFilename, 'PNG') | [
"def __writeFrame(self, saveDir=\"./ballData\"):\r\n if not os.path.exists(saveDir):\r\n os.makedirs(saveDir)\r\n saveName = str(int(time.time()))\r\n saveImgPath = os.path.join(saveDir, saveName + \".jpg\")\r\n try:\r\n cv2.imwrite(saveImgPath, self.frameArray)\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The number of fault domains that the Dedicated Host Group spans. Changing this forces a new resource to be created. | def platform_fault_domain_count(self) -> pulumi.Input[int]:
return pulumi.get(self, "platform_fault_domain_count") | [
"def domains_count(self):\n return self._domains_count",
"def get_number_of_agents_for_scheduling(self, context):\n return 1",
"def faulted_count(self) -> int:\n return pulumi.get(self, \"faulted_count\")",
"def domain_size(domain):\n fixed_domain_sizes = {\n \"current collector... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Manage a Dedicated Host Group. Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_dedicated_host_group = azure.compute.DedicatedHostGroup("exampleDedicatedHostGroup", resource_group_name=example_res... | def __init__(__self__,
resource_name: str,
args: DedicatedHostGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
... | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n automatic_placement_enabled: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an existing DedicatedHostGroup resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automatic_placement_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
... | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n active_nics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n allow_forged_transmits: Optional[pulumi.Input[bool]] = None,\n allow_mac_changes:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The number of fault domains that the Dedicated Host Group spans. Changing this forces a new resource to be created. | def platform_fault_domain_count(self) -> pulumi.Output[int]:
return pulumi.get(self, "platform_fault_domain_count") | [
"def domains_count(self):\n return self._domains_count",
"def get_number_of_agents_for_scheduling(self, context):\n return 1",
"def faulted_count(self) -> int:\n return pulumi.get(self, \"faulted_count\")",
"def domain_size(domain):\n fixed_domain_sizes = {\n \"current collector... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the Multi Agent State. It receives the solver settings and the list of all single agent states. | def __init__(self, single_agents_states, solver_settings, parent=None):
super().__init__(single_agents_states, solver_settings, parent=parent)
self._back_propagation_set = []
self._collisions_set = set()
self.compute_cost()
self.compute_heuristics() | [
"def _initialize_agents(self):\n\n for agent in self.agents:\n agent.fill_with_binary()\n\n self.best_agent = copy.deepcopy(self.agents[0])",
"def initial_agent_states(self) -> Dict[str, AgentState]:\n if self._initial_agent_states is None:\n raise AEAEnforceError(\"Call... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Expand the current state. For each single state, if the corresponding agent is not in the collision set, the next single state will be the one obtained by following the optimal policy, otherwise if it is in the collision set all the possible moves will be considered for that agent. Then these states are iterated in ord... | def expand(self, verbose=False):
if verbose:
print("Expansion in progress... COLLISIONS SET {:<24}".format(str(self._collisions_set)), end=" ")
candidate_list = []
for i, single_state in enumerate(self._single_agents_states):
if i in self._collisions_set:
... | [
"def expand(state: State) -> Generator[State, None, None]:\n n = len(state)\n for i in range(n):\n for j in range(n):\n if state[i, j] in Problem.values:\n continue\n else:\n for v in Problem.values:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the back propagation set with the back_set inserted. | def set_back_propagation_set(self, back_set):
self._back_propagation_set = back_set | [
"def get_back_propagation_set(self):\n return self._back_propagation_set",
"def backstep(self):\n\n self.input.setDelta(self.output.getNetDelta())\n self.output.value = self.history.pop()",
"def restore(self):\n\n # Restore the sets\n try:\n self.mr.master_atoms_mapped.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the back propagation set of this state. | def get_back_propagation_set(self):
return self._back_propagation_set | [
"def set_back_propagation_set(self, back_set):\n self._back_propagation_set = back_set",
"def prev_state_combiner(self):\n if hasattr(self, \"_prev_state_combiner\"):\n return self._prev_state_combiner\n else:\n return list(set(self.combiner) - set(self.current_combiner)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the collisions set with the collisions_set inserted. | def set_collisions_set(self, collisions_set):
self._collisions_set = collisions_set | [
"def collisions(self, collisions):\n\n self._collisions = collisions",
"def __set_collision(self, collision):\n if self.collisions:\n self.collisions[0] = collision\n else:\n self.collisions.append(collision)",
"def sets(self, sets):\n\n self._sets = sets",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the collision set of this state. | def get_collisions_set(self):
return self._collisions_set | [
"def __get_collision(self):\n if self.collisions:\n return self.collisions[0]",
"def get_collisions(self):\r\n\r\n all_collisions = pygame.sprite.Group()\r\n all_collisions.add(pygame.sprite.spritecollide(self, self.walls, False),\r\n pygame.sprite.spritec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if the multi agent state and the given multi agent state has the same positions for all the single agent states. | def equal_position(self, other):
assert isinstance(other, MStarState)
for i, single_state in enumerate(self._single_agents_states):
if not single_state.equal_position(other.get_single_agent_states()[i]):
return False
return True | [
"def equal_position_and_time_step(self, other):\n assert isinstance(other, MStarState)\n for i, single_state in enumerate(self._single_agents_states):\n if not single_state.equal(other.get_single_agent_states()[i]):\n return False\n return True",
"def check_visited_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if the multi agent state and the given multi agent state has the same positions for all the single agent states. | def equal_position_and_time_step(self, other):
assert isinstance(other, MStarState)
for i, single_state in enumerate(self._single_agents_states):
if not single_state.equal(other.get_single_agent_states()[i]):
return False
return True | [
"def equal_position(self, other):\n assert isinstance(other, MStarState)\n for i, single_state in enumerate(self._single_agents_states):\n if not single_state.equal_position(other.get_single_agent_states()[i]):\n return False\n return True",
"def check_visited_positi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get a aggregator given its identifier | def get(self, aggregator_id):
aggregator = get_a_aggregator(aggregator_id)
if not aggregator:
return {'success': False, 'msg': 'aggregator does not exist'}
else:
return aggregator | [
"def get_aggregator(cls, aggregator_name):\n try:\n aggregator_class = cls._class_registry[aggregator_name.lower()]\n except KeyError:\n raise KeyError(\"No such chart type: {0:s}\".format(aggregator_name.lower()))\n return aggregator_class",
"def get_aggregator(gt_id):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete aggregator by id | def delete(self, aggregator_id):
le_aggregator = get_a_aggregator(aggregator_id)
if not le_aggregator:
return {'success': False, 'msg': 'aggregator does not exist'}
else:
db.session.delete(le_aggregator)
db.session.commit()
return {'success': True,... | [
"def deleteGroup(id):",
"def delete(request, agg_id):\n next = request.GET.get(\"next\", None) or reverse(\"home\")\n aggregate = get_object_or_404(Aggregate, id=agg_id).as_leaf_class()\n # Stop all slices using the aggregate\n if request.method == \"POST\":\n for s in aggregate.slice_set.all()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ANOVA table for one fitted linear model. | def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,
pr_test, robust):
#maybe we should rethink using pinv > qr in OLS/linear models?
effects = getattr(model, 'effects', None)
if effects is None:
q,r = np.linalg.qr(exog)
effects = np.dot(q.T, ... | [
"def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,\n pr_test, robust):\n #maybe we should rethink using pinv > qr in OLS/linear models?\n effects = getattr(model, 'effects', None)\n if effects is None:\n q,r = np.linalg.qr(exog)\n effects = n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ANOVA table for one or more fitted linear models. | def anova_lm(*args, **kwargs):
typ = kwargs.get('typ', 1)
### Farm Out Single model ANOVA Type I, II, III, and IV ###
if len(args) == 1:
model = args[0]
return anova_single(model, **kwargs)
try:
assert typ in [1,"I"]
except:
raise ValueError("Multiple models only s... | [
"def anova_lm(*args, **kwargs):\n typ = kwargs.get('typ', 1)\n\n ### Farm Out Single model Anova Type I, II, III, and IV ###\n\n if len(args) == 1:\n model = args[0]\n return anova_single(model, **kwargs)\n\n if typ not in [1, \"I\"]:\n raise ValueError(\"Multiple models only suppor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the reward_text of this DestinyActivityRewardDefinition. | def reward_text(self, reward_text):
self._reward_text = reward_text | [
"def setExperienceReward(self, flag):\n self.handle.rewardExp = flag",
"def add_reward(self, state: Tuple[int, int], reward: float):\n assert len(state) == 2, \"state shape must be 2D\"\n self.rewards[state[0], state[1]] = reward",
"def update_reward(self, reward, force=False):\n if forc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the reward_items of this DestinyActivityRewardDefinition. | def reward_items(self, reward_items):
self._reward_items = reward_items | [
"def refund_items(self, refund_items):\n\n self._refund_items = refund_items",
"def _reset_rewards(self):\n self.rewards = [0, 0, 0, 0]\n self.nstep = self.game.active_player",
"def itemizations(self, itemizations):\n\n self._itemizations = itemizations",
"def blocked_items(self, b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the value of the raster at a particular point | def read_value(self, point):
xOffset = int((point.x - self.xOrigin) / self.pixelWidth)
yOffset = int((point.y - self.yOrigin) / self.pixelHeight)
data = self.band.ReadAsArray(xOffset, yOffset, 1, 1)
return Distance(m=data[0,0]) | [
"def raster_values_at_points(xy, raster_file, band=1, nodata_rel_tol=1.0e-08):\n\n # Raster info\n raster = gdal.Open(raster_file)\n raster_band = raster.GetRasterBand(band)\n raster_band_type = gdal.GetDataTypeName(raster_band.DataType)\n\n # Projection info\n transform = raster.GetGeoTransform()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a tree, a set of rules, and an initial state, return the list of all the trees that are produced by the transduction. | def transduce(tree, rules, initial):
# list of the current generation of SearchStates
current = []
complete = []
# give the root the initial state.
statemap = {():'q'}
current.append(SearchState(tree, statemap, 1.0))
progress = True
while progress:
nextgen = []
# for e... | [
"def filter_subtree(head, rules):\n for rule, replacement in rules:\n if rule(head):\n if callable(replacement):\n replacement = replacement(head)\n return [replacement]\n if len(list(head.children)) == 0:\n return [head]\n output = []\n for child in he... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add dropdown items to UI Tabs. | def uitab_dropdown_items(tab_name, tab, domain, request) -> List[dict]: | [
"def generate_item_dropdown(self, e):\n self.items_df = self.df.query(\"types == @self.food_type_dropdown.get()\")\n self.food_names_list = list(self.items_df[\"title\"])\n self.food_names_dropdown.config(value=self.food_names_list)",
"def addDropDown(self, *args) -> \"adsk::core::Ptr< adsk::... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add sidebar items to UI tabs. | def uitab_sidebar_items(tab_name, tab, domain, request) -> List[Tuple[str, List[dict]]]: | [
"def setup_sidebar_items(self):\n\t\tif self.data.allow_sidebar_items:\n\t\t\t# disable all\n\t\t\tfrappe.db.sql(\"update `tabPortal Menu Item` set enabled=0\")\n\n\t\t\t# enable\n\t\t\tfrappe.db.sql(\n\t\t\t\t\"\"\"update `tabPortal Menu Item` set enabled=1\n\t\t\t\twhere route in ({})\"\"\".format(\n\t\t\t\t\t\",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the closest Station for the zone of the point choosed for one year | def getClosestSationByYearSingleBlock(lon,lat, year):
lon_t = int(lon)
lat_t = int(lat)
rows = session.execute(f"""SELECT * FROM {KEYSPACE}.{TABLE} where lon_t={lon_t} AND lat_t={lat_t} AND year={year}""")
for row in rows:
row0 = None
row1 = row
row2 = row
point0 = (0,0... | [
"def getClosestSationByYearMultiBlock(lon,lat, year):\n lon_t = int(lon)\n lat_t = int(lat)\n row0 = None\n for lon_m in (lon_t - 1,lon_t, lon_t + 1):\n for lat_m in (lat_t - 1,lat_t, lat_t + 1):\n rows = session.execute(f\"\"\"SELECT * FROM {KEYSPACE}.{TABLE} where lon_t={lon_m} AND ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the closest Station for the zone of the point choosed for one year | def getClosestSationByYearMultiBlock(lon,lat, year):
lon_t = int(lon)
lat_t = int(lat)
row0 = None
for lon_m in (lon_t - 1,lon_t, lon_t + 1):
for lat_m in (lat_t - 1,lat_t, lat_t + 1):
rows = session.execute(f"""SELECT * FROM {KEYSPACE}.{TABLE} where lon_t={lon_m} AND lat_t={lat_m} ... | [
"def getClosestSationByYearSingleBlock(lon,lat, year):\n lon_t = int(lon)\n lat_t = int(lat)\n rows = session.execute(f\"\"\"SELECT * FROM {KEYSPACE}.{TABLE} where lon_t={lon_t} AND lat_t={lat_t} AND year={year}\"\"\")\n for row in rows: \n row0 = None\n row1 = row\n row2 = row\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if a row exists for the specified date | def _row_exists(self, session, fordate):
return (session.query(EconomicIndicator).filter_by(Date=fordate).count() > 0) | [
"def __contains__(self, date):\n return self._first_day <= date <= self._last_day",
"def has_date(self, line):\n return self.verify_match(line, self.date)",
"def exists(self, initdate, enddate):\n return self.queue.exists(initdate, enddate)",
"def check_generated_data(date) -> bool:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the end address of this selection. Address | def getEndAddress(self) -> ghidra.program.model.address.Address:
... | [
"def end_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"end_address\")",
"def end_ip_address(self) -> Optional[str]:\n return pulumi.get(self, \"end_ip_address\")",
"def getEndingAddress(self):\n return HopperLowLevel.getBasicBlockEndingAddress(self.__procedure__.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the start location. ProgramLocation | def getFrom(self) -> ghidra.program.util.ProgramLocation:
... | [
"def getTo(self) -> ghidra.program.util.ProgramLocation:\n ...",
"def location(self) -> str:\n if self.__expanded_launch_file_path is None:\n # get_launch_description() has not been called yet\n return ' + '.join([str(sub) for sub in self.__launch_file_path])\n return se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the start address of this selection. Address | def getStartAddress(self) -> ghidra.program.model.address.Address:
... | [
"def getStartingAddress(self):\n return HopperLowLevel.getSegmentStartingAddress(self.__internal_segment_addr__)",
"def getStartingAddress(self):\n return HopperLowLevel.getSectionStartingAddress(self.__internal_section_addr__)",
"def start(self) -> SourceLocation:\n return self._start",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the end location. ProgramLocation | def getTo(self) -> ghidra.program.util.ProgramLocation:
... | [
"def getFrom(self) -> ghidra.program.util.ProgramLocation:\n ...",
"def getEndLocation(self):\n ends = [\"End of the Project Gutenberg EBook\",\n \"End of Project Gutenberg's\",\n \"\\*\\*\\*END OF THE PROJECT GUTENBERG EBOOK\",\n \"\\*\\*\\* END OF THIS ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all the claimable balances an account has to claim. | def getClaimableBalances(public_key: str) -> list:
balances = server.claimable_balances().for_claimant(public_key).call()['_embedded']['records']
return [ {"sponsor": elem.get("sponsor"), "id": elem.get("id"), "asset": elem.get("asset").replace('native', 'XLM').split(':')[0], "amount": round(int(float(elem.get(... | [
"def balances():\n return _make_request('balances', private=True)['balances']",
"def get_balance(self):\n returnList = []\n for account in self.accounts:\n balance = self.f.get_balance(account).amount.amount + 42\n returnList.append(BalanceItem(account.iban, balance ))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if in the balances of the account an asset like that alredy exists to establish a trustline | def checkTrustline(asset :str, issuer:str, available_assets: list) -> bool:
for elem in available_assets:
if elem["sponsor"] == asset:
return True
return False | [
"def check_assets(self):\n try:\n active_assets = r.get(f\"active_{self.curr}_{self.user_id}\")\n if not active_assets:\n r.set(f\"active_{self.curr}_{self.user_id}\", 0)\n return False\n active_assets = Decimal(active_assets.decode())\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate an XDR to Claim a Balance using Albedo or web+stellar | def XDRForClaimableBalance(public_key: str, balance_id: str, asset=None, asset_issuer=None):
base_fee = server.fetch_base_fee()
if(getAssets(public_key)[0] == 0):
# 3. User does not have enough XLM to pay fees
account = server.load_account(public_key)
transaction = TransactionBuilder(
... | [
"def generate_reward_tx(rewardee, base_fee = None):\n try:\n source_acc = server.load_account(PUBLIC_KEY) # fetch sequence or will it be used with a fee bump?\n except:\n print(f\"Failed to load public reward account {PUBLIC_KEY}!\")\n return None\n\n fee = BASE_FEE\n\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
read regions from a file. | def readFromFile(self, infile, ignore_strand=False):
self.mForwardRegions = {}
self.mReverseRegions = {}
self.mRegions = []
self.mIgnoreStrand = ignore_strand
n = 0
for line in infile:
if line[0] == "#":
continue
token, sbjct_toke... | [
"def read_regions_data(prefix):\n\n ret = []\n idx = {}\n columns = []\n for line in open(prefix+'_regions.txt'):\n line = line.strip()\n if line == '':\n continue\n\n if line.startswith('#'):\n line = line[1:]\n header = line.split()\n fo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates angular velocities and linear accelerations via IMU measurement | def callback_imu(msg):
global omega, a, imu_callback_done
if not imu_callback_done:
imu_callback_done = True
omega = [-msg.angular_velocity.x, -msg.angular_velocity.y, -msg.angular_velocity.z]
a = [msg.linear_acceleration.x, msg.linear_acceleration.y, msg.linear_acceleration.z] | [
"def update_imu(self, msg):\n\t\tself.sen.imu.acc_body = enu_to_ned(np.array([[msg.linear_acceleration.x], [msg.linear_acceleration.y], [msg.linear_acceleration.z]]))\n\t\tself.sen.imu.ang_vel = enu_to_ned(np.array([[msg.angular_velocity.x], [msg.angular_velocity.y], [msg.angular_velocity.z]]))",
"def update( sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Publish estimated position and orientation and preparation for next cycle | def finish_loop(self):
pub = rospy.Publisher('/ekf_pose', PoseStamped, queue_size=10)
msg = PoseStamped()
msg.header.frame_id = '/map'
msg.header.stamp = rospy.Time().now()
msg.pose.position = Point(self.X_est[0, 0], self.X_est[1, 0], self.X_est[2, 0])
x, y, z, w... | [
"def print_position(self) -> None:\n self.hkl_now = list(self.calculate_hkl_from_angles())\n self.pseudo_dict_to_update = self.get_pseudo_angles_from_motor_angles()\n print(\"\")\n print(\n \"HKL now = \",\n format_5_decimals(self.hkl_now[0]),\n format_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Errorhandling wrapper around pythonaddins.GPToolDialog. | def toolDialog(toolbox, tool):
result = None
try:
result = pythonaddins.GPToolDialog(toolbox, tool)
# FIXME: this is a hack to prevent:
# TypeError: GPToolDialog() takes at most 1 argument (2 given)
# print ''
except TypeError:
pass
# don't return anything. this p... | [
"def popupBadEventError(self):\n keys_origin = (\"Time\", \"Latitude\", \"Longitude\", \"Depth\",\n \"used P Count\", \"used S Count\")\n keys_magnitude = (\"Magnitude\",)\n missing = [key for key in keys_origin if key not in self.dictOrigin]\n missing += [key for k... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the Neural Representational Similarity (NPS) for fMRI data for ROI | def nps_fmri_roi(fmri_data, mask_data):
if len(np.shape(fmri_data)) != 5 or np.shape(fmri_data)[0] != 2:
print("\nThe shape of fmri data should be [2, n_subs, nx, ny, nz].\n")
return "Invalid input!"
if len(np.shape(mask_data)) != 3:
print("\nThe shape of fmri data should be [nx, ny... | [
"def psnr(images, finalpred):\n pixel_max = 255.0\n mse = np.mean((images-finalpred)**2)\n p = 20 * math.log10( pixel_max / math.sqrt( mse ))\n return p",
"def compute_PSNR(out, lbl):\n out = out[0, :, :, 0]\n lbl = lbl[0, :, :, 0]\n diff = out - lbl\n rmse = np.sqrt(np.mean(diff**2))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Produce random rgb colors for graphs. | def random_colors():
def r():
return random.randint(0, 255)
return 'rgb({},{},{})'.format(r(), r(), r()) | [
"def random_color_gen():\n r = lambda: random.randint(0, 255)\n return 'ff%02X%02X%02X' % (r(), r(), r())",
"def randColor():\n h = 0.3\n v = 0.85\n s = 0.9\n \n for c in startColors:\n yield c\n\n while True:\n \n def toHex(x):\n return hex(int(x*255))[2:]\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of tuples with counted offenses. | def offense_counter(offense_list):
sum_offense = Counter()
for offense in offense_list:
if offense is None:
continue
sum_offense[offense] += 1
return sum_offense.most_common() | [
"def get_occurrences():",
"def list_itemcnt(a_list):\n return list(Counter(a_list).items())",
"def hits(clues):\n return sum([clue.getCount() for clue in clues])",
"def _offer_counter(self):\n self._offer_count += 1\n return (self.name, self._offer_count)",
"def calorie_list(list_of_even... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a dictionary of crimes broken down by subcategories. | def crime_category_breakdown():
db_request = main_db_call()
all_crimes = [item[0] for item in db_request]
sub_offense = offense_counter(all_crimes)
sub_pie = color_applicator(sub_offense)
sub_dict = {}
for i, thing in enumerate(sub_pie):
for key, category in UPPER_DICT.items():
... | [
"def parse_coco_categories(categories):\n cat_map = {c[\"id\"]: c for c in categories}\n\n classes = []\n supercategory_map = {}\n for cat_id in range(max(cat_map) + 1):\n category = cat_map.get(cat_id, None)\n try:\n name = category[\"name\"]\n except:\n name ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Table ajax for dataTables | def tableajax(request, plugin_name, data, group_type="all", group_id=None):
# Pull our variables out of the GET request
get_data = request.GET["args"]
get_data = json.loads(get_data)
draw = get_data.get("draw", 0)
start = int(get_data.get("start", 0))
length = int(get_data.get("length", 0))
... | [
"def updateTable(self):\r\n self.dataTable = Table(self.frame, dataframe = self.data)\r\n self.dataTable.show()",
"def clm_ajax_get_table_users(request):\n if request.method == 'GET':\n users = prep_data('admin_clm/user/get_list/', request.session)\n\n for item in users:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process a single management source's data This function first optionally calls any additional processors for the management source in question (Munki for example). Then it processes Facts. Then ManagedItems. | def process_management_submission(source, management_data, machine, object_queue):
# Add custom processor funcs to this dictionary.
# The key should be the same name used in the submission for ManagementSource.
# The func's signature must be
# f(management_data: dict, machine: Machine, object_queue: dic... | [
"def manage_data_source(self, request):\n\n layout = ManageDataSourceItemsLayout(self, request)\n return morepath.redirect(layout.manage_model_link)",
"def _process_finalize(self, contexts):\n added = []\n updated = []\n name_updated = []\n\n for ctx in contexts:\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The function reads the image and processes the image to extract the feature_vector of the image | def custom_feature_transformation(image_file_path):
image = cv2.imread(image_file_path)
thresholded_image = highlight_invariant_threshold(image)
filled_image = hole_fill(thresholded_image)
region_grown_image = grow_region(filled_image)
image_array = image_to_array(region_grown_image)
feature_vec... | [
"def calculate_feature_vector(path):\n\ttf_image = preprocess_image(path)\n\treturn module(tf_image)",
"def feature_extraction(self, sample):\n image, filename = sample\n\n if self.feature_model.training:\n print(\"Run feature model in inference mode!\")\n exit(0)\n\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
[Scans directory for abnormalities in images] | def scan_image_abnormalities(base_path, base_img_resolution, base_msk_resolution):
# Defining lists for appending paths of abnormal images, and their heights, widths, and channels
ab_imgs, ab_im_h, ab_im_w, ab_im_c = [], [], [], []
ab_masks, ab_msk_h, ab_msk_w, ab_msk_c = [], [], [], []
train_files, m... | [
"def find_images(folder,img_type):\n pass",
"def scan_path(directory):\n objname= str(base64.b64encode(directory.encode('utf-8')))\n preprocess='preprocess'\n\n if not os.path.isdir(preprocess):\n os.mkdir(preprocess)\n if os.path.isfile(preprocess+'/'+objname):\n picklefile=open(prep... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes all threads that return FALSE on isAlive() from the running_threads list | def free_dead(self):
for th in self.running_threads[:]:
if th[0].isAlive() == False:
self.running_threads.remove(th) | [
"def _monitor(self):\n if len(self.threads) == 1:\n self._monitor_one(self.threads[0])\n return\n\n all_alive = True\n while all_alive:\n if not all([t.isAlive() for t in self.threads]):\n for thread in self.threads:\n thread.st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Joins all the threads together into the calling thread. | def joinall(self):
for th in self.running_threads[:]:
while th[0].isAlive():
sleep(0.1)
th[0].join()
# print "Thread:",th[1],"joined","isalive:",th[0].isAlive() --- Debug stuff | [
"def join(self):\n for thread in self.threads:\n while 1:\n thread.join(1)\n if not thread.isAlive():\n break",
"def join(self):\n logger.debug(\"Joining Threads: '%s'\", self._name)\n for thread in self._threads:\n logger... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns parameters from the running_threads list for external manipulation | def get_all_params(self):
for thli in self.running_threads:
yield(thli[0],thli[1],thli[2]) | [
"def thread_info(self):\n print(threading.active_count())\n for t in threading.enumerate():\n print(t.getName())",
"def hyperthreads_for(rank_spec):",
"def getAllThreads(self):\n raise NotImplementedError",
"def getThreads():\n if sys.platform == 'win32':\n return (in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the given element_names from xmlfile and yield compound objects for their xml subtrees (no extra objects are returned if element_names appear in the subtree) The compound objects provide all element attributes of the root of the subtree as attributes unless attr_names are supplied. In this case attr_names maps e... | def parse(xmlfile, element_names, element_attrs={}, attr_conversions={},
heterogeneous=False, warn=False):
if isinstance(element_names, str):
element_names = [element_names]
elementTypes = {}
for _, parsenode in ET.iterparse(_open(xmlfile, None)):
if parsenode.tag in element_names:... | [
"def parse_fast_nested(xmlfile, element_name, attrnames, element_name2, attrnames2,\n warn=False, optional=False, encoding=\"utf8\"):\n Record, reprog = _createRecordAndPattern(element_name, attrnames, warn, optional)\n Record2, reprog2 = _createRecordAndPattern(element_name2, attrnames2,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the given attrnames from all elements with element_name And attrnames2 from element_name2 where element_name2 is a child element of element_name | def parse_fast_nested(xmlfile, element_name, attrnames, element_name2, attrnames2,
warn=False, optional=False, encoding="utf8"):
Record, reprog = _createRecordAndPattern(element_name, attrnames, warn, optional)
Record2, reprog2 = _createRecordAndPattern(element_name2, attrnames2, warn, opt... | [
"def attrib_parser(element, fields):\r\n attr_dict = {}\r\n\r\n # Fill attr_dict from element attributes but only attributes designated by field\r\n for attr in element.attrib: # takes elements specified in field\r\n if attr in fields:\r\n attr_dict[attr] = element... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper method that reads the model metadata file for the model selected. | def read_model_metadata_file(model_metatdata_file):
try:
err_msg = ""
if(not os.path.isfile(model_metatdata_file)):
err_msg = "No model_metadata_file for the model selected"
return 1, err_msg, {}
with open(model_metatdata_file) as json_file:
data = json.lo... | [
"def readModel(self, path) -> None:\n ...",
"def load_model_metadata(self):\n info_dict = joblib.load(self.path_model_metadata)\n\n self.max_sequence_size = info_dict['max_sequence_size']\n self.number_of_distinct_items = info_dict['number_of_distinct_items']\n self.item_diction... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper method that returns the corresponding enum values for sensors in the model_metadata json of the model selected. | def get_sensors(model_metatdata_json):
try:
sensors = None
err_msg = ""
if constants.ModelMetadataKeys.SENSOR in model_metatdata_json:
sensor_names = set(model_metatdata_json[constants.ModelMetadataKeys.SENSOR])
if all([constants.SensorInputKeys.has_member(sensor_name... | [
"def get_sensor_labels(self) -> Dict[str, List[str]]:\n return {}",
"def _get_fsm_sensor(self):\n fsm, state = self._get_fsm_state()\n sensor = state.sensors[fsm.selected_sensor]\n return fsm, sensor",
"def test_enum_value(self):\n \n type = simdat.SimulationDataType.RE... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |