query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Calculate the price with a discount and return the sum | def calculate_total_price(prices, discount):
sum_prices = 0
for price in prices:
dis = discount/100
pricedis = price - price * dis
print(pricedis)
sum_prices = sum_prices + pricedis
print(sum)
return math.floor(sum_prices) | [
"def apply_discount(price, discount):\n return (money_to_float(price)\n .fold(lambda cost:\n (percent_to_float(discount)\n .fold(lambda savings: cost * (1 - savings)))))",
"def get_final_price(price, discount_percentage=10):\n return price-( price* discount_percenta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a list of dictionaries with "width", "height", and "method" keys and creates a map from image media types to the thumbnail size, thumbnailing method, and thumbnail media type to precalculate | def parse_thumbnail_requirements(
thumbnail_sizes: List[JsonDict],
) -> Dict[str, Tuple[ThumbnailRequirement, ...]]:
requirements: Dict[str, List[ThumbnailRequirement]] = {}
for size in thumbnail_sizes:
width = size["width"]
height = size["height"]
method = size["method"]
fo... | [
"def create_qrcode_thumbnails(original_image):\n thumbnail_kwargs = {\"format\": \"PNG\"}\n sizes = {\"small\": \"64x64\", \"medium\": \"256x256\", \"large\": \"490x490\"}\n images = {}\n for size, resolution in sizes.items():\n if size == \"square\":\n thumbnail = get_thumbnail(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that trial specific restrictions apply | def test_trial_only_restriction(self):
self.given({
"rules:trial": {
"123": "restrict",
}
})
self.expect((
("1234", "restrict"),
("1235", "restrict"),
("1335", None),
)) | [
"def visitTrial(self, testSuite):",
"def test_all_same():\n assert meets_criteria(111111) == True, \"Should be True\"",
"def test_conformance(self):\n self._request_valid(\"conformance\")",
"def test_getinvestmentrequirements(self):\n pass",
"def test_pos_2():\n assert meets_criteria(111... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs pairwise matching of nans between ``x`` and ``y``. | def match_nans(x, y):
if has_missing(x) or has_missing(y):
# Need to copy to avoid mutating original objects and to avoid writeable errors
# with ``xr.apply_ufunc`` with vectorize turned on.
x, y = x.copy(), y.copy()
idx = np.logical_or(np.isnan(x), np.isnan(y))
# NaNs cannot... | [
"def nmi(X, Y):\n # remove pairs with a missing value in comparison\n\n new_X , new_Y = remove_pairs_with_a_missing(X, Y, missing_char=0)\n return normalized_mutual_info_score(new_X, new_Y), None #return NormalizedMutualInformation(pData1, pData2).get_distance() ",
"def distance_SNN(x, y):\n # Neighbo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Publishes a list of events. The events in the list 'events' are sent to the server in a new HTTP request. | def publish_events(self, events):
body = ztreamy.serialize_events(events)
logging.info("Connecting to " + self.hostname + " on port " + str(self.port))
conn = httplib.HTTPConnection(self.hostname, self.port)
conn.request('POST', self.path, body, ZtreamyClient._headers)
response =... | [
"def send_events_batch(self, data):\n return self._write_request(self._base_url, 'track/', data, batch=True)",
"def events(self, events: List[DeploymentEvent]):\n\n self._events = events",
"def add_events(self, events: List[Event]):\n self.events.extend(events)",
"def publisher():\n ba... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create `self.perf_patterns` and units only in `self.reference`. | def add_metrics(self):
for metric in self.METRICS:
#getlogger().info('creating metric %s', metric.label)
self.perf_patterns[metric.label] = reduce(self.stdout, self.num_tasks, metric.column, metric.function)
self.reference[metric.label] = (0, None, None, metric.unit)... | [
"def __init__(self, patterns):\n\n self.wildcard_patterns = {}\n literals = []\n \n for j, pattern in enumerate(patterns):\n pattern = pattern.strip()\n wildcards = pattern.count('*')\n \n if wildcards == 0:\n literals.append(pat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test cycle task allow_change_state value by user position. | def test_change_state_by_user(self, user, expected_values):
all_models.Cycle.query.filter(
all_models.Cycle.id == self.cycle_id
).update({
all_models.Cycle.is_current: True,
})
db.session.commit()
user = all_models.Person.query.filter_by(email=user).one()
self.api.set_user(user)
... | [
"def test_change_state_by_is_current(self, cycle_is_current):\n all_models.Cycle.query.filter(\n all_models.Cycle.id == self.cycle_id\n ).update({\n all_models.Cycle.is_current: cycle_is_current,\n })\n db.session.commit()\n user_mail = self.WORKFLOW_OWNER\n user = all_models.Person.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test cycle task allow_change_state value by Cycle is_current value. | def test_change_state_by_is_current(self, cycle_is_current):
all_models.Cycle.query.filter(
all_models.Cycle.id == self.cycle_id
).update({
all_models.Cycle.is_current: cycle_is_current,
})
db.session.commit()
user_mail = self.WORKFLOW_OWNER
user = all_models.Person.query.filter_... | [
"def test_change_state_by_user(self, user, expected_values):\n all_models.Cycle.query.filter(\n all_models.Cycle.id == self.cycle_id\n ).update({\n all_models.Cycle.is_current: True,\n })\n db.session.commit()\n user = all_models.Person.query.filter_by(email=user).one()\n self.api.se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a TSV string from the sentences_input table to a list, optionally applying a fn to each element | def tsv_string_to_list(s, func=lambda x : x, sep='|^|'):
if s.strip() == "":
return []
# Auto-detect separator
if re.search(r'^\{|\}$', s):
split = re.split(r'\s*,\s*', re.sub(r'^\{\s*|\s*\}$', '', s))
else:
split = s.split(sep)
# split and apply function
return [func(x) for x in split] | [
"def tsv_string_to_listoflists(s, func=lambda x : x, sep1='|^|', sep2='|~|'):\n return tsv_string_to_list(s, func=lambda x : tsv_string_to_list(x, func=func, sep=sep1), sep=sep2)",
"def tab_split(line: str, converter: Callable[[str], Any]=str) -> List[Any]:\n return [converter(x) for x in line.split('\\t')]",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a TSV string from sentences_input table to a list of lists | def tsv_string_to_listoflists(s, func=lambda x : x, sep1='|^|', sep2='|~|'):
return tsv_string_to_list(s, func=lambda x : tsv_string_to_list(x, func=func, sep=sep1), sep=sep2) | [
"def tsv_string_to_list(s, func=lambda x : x, sep='|^|'):\n \n if s.strip() == \"\":\n return []\n\n # Auto-detect separator\n if re.search(r'^\\{|\\}$', s):\n split = re.split(r'\\s*,\\s*', re.sub(r'^\\{\\s*|\\s*\\}$', '', s))\n else:\n split = s.split(sep)\n\n # split and apply function\n return [... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a list of ddlib.Word objects from input row. | def create_ddlib_sentence(row):
sentence = []
for i, word in enumerate(row.words):
sentence.append(ddlib.Word(
begin_char_offset=None,
end_char_offset=None,
word=word,
lemma=row.lemmas[i],
pos=row.poses[i],
ner=row.ners[i],
dep_par=row.dep_parents[i],
... | [
"def generate_word_count(self, row):\n\tword_counter = Counter(row[1])\n\treturn [ (word, [ (row[0], word_counter[word]) ] ) \\\n for word in word_counter ]",
"def create_boggle_lst(row):\n\t# Read row to lst\n\trow_lst = row.split(' ')\n\trow_lst = case_insensitive(row_lst)\n\t# Create boggle list\n\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Escape a string that's meant to be in a Postgres array. We doublequote the string and escape backslashes and doublequotes. | def pg_array_escape(tok):
return '"%s"' % str(tok).replace('\\', '\\\\').replace('"', '\\\\"') | [
"def _quote_escape(item):\n\n rex_sqlquote = re.compile(\"'\", re.M)\n\n return rex_sqlquote.sub(\"''\", item)",
"def escape_sql_values(field_value):\n\treturn field_value.replace(\"'\",\"''\")",
"def escape( *args ):\n cmd = ''\n for s in args:\n if cmd: cmd += ' '\n if not s:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print a tuple as output of TSV extractor. | def print_tsv_output(out_record):
values = []
for x in out_record:
if isinstance(x, list) or isinstance(x, tuple):
cur_val = list_to_pg_array(x)
elif x is None:
cur_val = '\N'
else:
cur_val = x
values.append(cur_val)
print '\t'.join(str(x) for x in values) | [
"def print_aln(tuple):\n print tuple[0]\n print tuple[1]\n print \"Score:\", tuple[2]",
"def tsv_line(value_list):\n return '\\t'.join([str(x) for x in value_list])",
"def get_tsv(self):\n msg = ''\n for stmt in self.get_statements():\n if not stmt.evidence:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs through lines in sys.stdin, applying row_fn(row_parser(line)) Assumes that this outputs a list of rows, which get printed out in tsv format Has standard error handling for malformed rows optimally row_fn returns object with pretty print | def run_main_tsv(row_parser, row_fn):
for line in sys.stdin:
for line_out in row_fn(row_parser(line)):
print_tsv_output(line_out) | [
"def _read_format_line(line, format):\r\n rows = line.strip().split(\"\\t\")\r\n return _read_format_rows(rows, format)",
"def rows(self):\n def parse_result_row(row):\n return row.split(\"\\t\")\n\n for row in self.results.data:\n yield parse_result_row(row)",
"def process_rows(self, row_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the desired capacity for the group. | def set_capacity(self, capacity):
params = {'AutoScalingGroupName': self.name,
'DesiredCapacity': capacity}
req = self.connection.get_object('SetDesiredCapacity', params,
Request)
self.connection.last_request = req
return req | [
"def set_course_capacity(self, capacity: int) -> None:\n self.course_capacity = capacity",
"def resize(self, capacity: int) -> None:\n self.capacity = capacity",
"def set_nominal_capacity(self, capacity_ah=30):\n\n self.master_payloads['BattCap_Write'][4] = capacity_ah",
"def set_capacity... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sync local changes with AutoScaling group. | def update(self):
return self.connection._update_group('UpdateAutoScalingGroup', self) | [
"def aws_update_autoscaler():\r\n ami_id = aws_create_ami_from()\r\n cur_date = time.strftime('%Y%m%d', time.gmtime())\r\n lcName = 'ns11-%s' % cur_date\r\n lc = LaunchConfiguration(name=lcName, \r\n image_id=ami_id, instance_type=env.aws.get('instance_type'),\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete this autoscaling group if no instances attached or no scaling activities in progress. | def delete(self, force_delete=False):
return self.connection.delete_auto_scaling_group(self.name,
force_delete) | [
"def destroy(self):\r\n return self.driver.ex_destroy_group(self)",
"def delete(name, force=False, region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n retries = 30\n while True:\n try:\n conn.delete_auto_sca... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Suspends Auto Scaling processes for an Auto Scaling group. | def suspend_processes(self, scaling_processes=None):
return self.connection.suspend_processes(self.name, scaling_processes) | [
"def delete(name, force=False, region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n retries = 30\n while True:\n try:\n conn.delete_auto_scaling_group(name, force)\n msg = \"Deleted autoscale group {}.\".fo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resumes Auto Scaling processes for an Auto Scaling group. | def resume_processes(self, scaling_processes=None):
return self.connection.resume_processes(self.name, scaling_processes) | [
"def resume(self, scaling_group):\r\n # NOTE: This is not yet implemented. The code is based on the docs,\r\n # so it should either work or be pretty close.\r\n return self._manager.resume(scaling_group)",
"def resume(self, scaling_group):\r\n uri = \"/%s/%s/resume\" % (self.uri_base, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes the rule with a specified queue to push the tag changes to. | def __init__(self, queue: Queue):
self._queue = queue | [
"def __init__(self, queue):\n self.queue = queue\n super(QueueHandler, self).__init__()",
"def init_queue(self):\n empty_graph = Hybrid_structure(5, 6)\n score = self.__score(empty_graph)\n self._queue[empty_graph] = score",
"def queue(self, queue_):\n self._queue... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Processes the resource with the rule. The resource will first be checked to see the rule should be run using 'check_condition'. | def process(self, resource: Resource):
if self.check_condition(resource):
tags = self.get_tags(resource)
payload = {
"resource": resource.to_dict(),
"tags": tags
}
self._queue.push(json.dumps(payload))
return True
... | [
"def run_evaluate(self, instance):\n # return None if not true, or rule_status id if True (for reporting)?\n if not self.active:\n return\n\n hits = 0\n condition = None\n\n for condition in self.condition_set.all():\n result = condition.run_evaluate(instance... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Measures the performance of bigint bitwise binops | def bench_ak_bitwise_binops(benchmark, op):
cfg = ak.get_config()
N = pytest.prob_size * cfg["numLocales"]
a1 = ak.randint(0, 2**32, N, dtype=ak.uint64, seed=pytest.seed)
a2 = ak.randint(0, 2**32, N, dtype=ak.uint64, seed=pytest.seed)
a = ak.bigint_from_uint_arrays([a1, a2], max_bits=pytest.m... | [
"def mul_bin(b1,b2):\r\n\r\n n1 = bin_to_dec(b1) \r\n n2 = bin_to_dec(b2)\r\n\r\n b = dec_to_bin(n1*n2)\r\n\r\n return b",
"def test_nth_bit_set():\n for _ in range(0, 10000):\n number = random.randint(0, 100000000)\n bits = bin(number)[2:]\n for i, b in enumerate(reversed(b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decodes given onehotvector into its value. | def decode(self, one_hot_vector):
# TODO: Implement this method
index = 0
for entry in one_hot_vector:
if entry == 1:
return self.tags[index]
else:
index = index + 1 | [
"def decode(self, one_hot_vector):\n # TODO: Implement this method\n for i in range(len(self.tags)):\n if one_hot_vector[i] == 1:\n return self.tags[i]",
"def reverse_one_hot_vector_encoding(self,\n df_one_hot_vector:pd.DataFrame=None)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the md5 hash of a file. | def _get_file_md5sum(file_name):
hash_obj = hashlib.md5()
with open(file_name, 'rb') as f:
hash_obj.update(f.read())
return hash_obj.hexdigest().encode('utf-8') | [
"def filehash(file):\n hasher = hashlib.md5()\n f = open(file, 'rb')\n buf = f.read()\n hasher.update(buf)\n return hasher.hexdigest()",
"def compute_md5(file):\n md5 = hashlib.md5()\n while True:\n buf = file.read(8192)\n if not buf:\n break\n md5.update(buf)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mark old IDs as inactive; ensure that new ID users have the same id. Used when a user creates multiple accounts and only wants to keep one. | def deactivate_user(new_id: int, old_ids: list[int]):
# deactivate old user ids, and reassign assessments for current user
new_user = HAWCUser.objects.get(id=new_id)
for old_user in HAWCUser.objects.filter(id__in=old_ids):
for assessment in old_user.assessment_pms.all():
if assessment no... | [
"def set_as_inactive(self):\n with transaction.atomic():\n self.is_member = False\n self.is_secretary = False\n self.is_treasurer = False\n self.is_president = False\n self.is_inactive = True",
"def update_ids(self):\n self.uid += 1",
"def inv... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate report on the physical structure of the target SpiNNaker \ machine. | def generate_machine_report(report_directory, machine, connections):
file_name = report_directory + os.sep + "machine_structure.rpt"
f_machine_struct = None
try:
f_machine_struct = open(file_name, "w")
except IOError:
logger.error("Generate_placement_reports: Can't open file {} for "
... | [
"def writeSegmentDetailsKml(outPath,singleSimulation,nodes):",
"def generate_report(self, report_path: str):\n\n # Dictionary mapping type names to facts. Each type name is mapped\n # to a dictionary which maps sources to a list of facts. This makes\n # organizing the output report easier.\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all the EIA plant IDs associated with a given EIA operator ID. | def eia_operator_plants(operator_id, pudl_engine):
Session = sa.orm.sessionmaker()
Session.configure(bind=pudl_engine)
session = Session()
pudl_plant_ids = [p.plant_id for p in session.query(models.UtilityEIA923).
filter_by(operator_id=operator_id).
first().ut... | [
"def get_mapped_plants_eia():\n mapped_plants_eia = (\n get_plant_map()\n .loc[:, [\"plant_id_eia\", \"plant_name_eia\"]]\n .dropna(subset=[\"plant_id_eia\"])\n .astype({\"plant_id_eia\": int})\n .drop_duplicates(\"plant_id_eia\")\n .sort_values(\"plant_id_eia\")\n )\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate generation vs. expense correlation for FERC Form 1 plants. This function helped us identify which of the expns_ fields in the FERC Form 1 dataset represent production costs, and which are nonproduction costs, for the purposes of modeling marginal cost of electricity from various plants. We expect the differen... | def ferc1_expns_corr(pudl_engine, capacity_factor=0.6):
steam_df = pd.read_sql('SELECT * FROM plants_steam_ferc1', pudl_engine)
steam_df['capacity_factor'] = \
(steam_df['net_generation_mwh'] / 8760 * steam_df['total_capacity_mw'])
# Limit plants by capacity factor
steam_df = steam_df[steam_df[... | [
"def fuel_ferc1(testing=False):\n pudl_engine = pudl.db_connect_pudl(testing=testing)\n fuel_ferc1_tbl = pt['fuel_ferc1']\n fuel_ferc1_select = sa.sql.select([fuel_ferc1_tbl, ])\n fuel_df = pd.read_sql(fuel_ferc1_select, pudl_engine)\n\n # We have two different ways of assessing the total cost of fue... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a dataframe with the proportion of generation for each generator. | def generator_proportion_eia923(g):
# Set the datetimeindex
g = g.set_index(pd.DatetimeIndex(g['report_date']))
# groupby plant_id and by year
g_yr = g.groupby([pd.TimeGrouper(freq='A'), 'plant_id', 'generator_id'])
# sum net_gen by year by plant
g_net_generation_per_generator = pd.DataFrame(
... | [
"def initialize_genomes_df():\n # Genomes attribution:\n # Weights for use of Evolving Algorithms to evaluate moves.\n weights = ['w_highest tile',\n 'w_score',\n 'w_number of zeros',\n 'w_potential two step score',\n 'w_distance from right',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract data relevant to the calculation of a power plant's MCOE. Given a PUDL utility_id and a PUDL plant_id, return several data series relevant to the calculation of the plant's marginal cost of electricity (MCOE). Both utility_id and plant_id are required because the same plants are reported by multiple FERC respon... | def mcoe_by_plant(utility_id, plant_id, pudl_engine, years):
# For testing purposes right now...
utility_id = 272 # PSCo's PUDL utility_id
plant_id = 122 # Comanche's PUDL plant_id
# Grab the tables that we're going to need to work with from FERC.
pudl_tables = models.PUDLBase.metadata.tables
... | [
"def get_mfcc_matrices_for_each_speaker(data, winlen=0.025, numcep=13, nfilt=26, nfft=512, appendEnergy=True, delta_=True, deltadelta_=True):\n ret = defaultdict(lambda: [])\n for el in data:\n speaker_id = el[0].split('_')[0]\n digit = el[0].split('_')[1]\n assert(len(speaker_id) == 5 an... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
API endpoint returning a JSON dict representing the result of a call to `utils.has_septic()` URL parameters `address` and `zipcode` are required or the view will report failure. If successful, the result of `has_septic()` is stored as a boolean under the `result` key If an error is caught, it is recorded under the `err... | def check_septic(request, api_version=None):
address = request.GET.get('address')
zipcode = request.GET.get('zipcode')
if not (address and zipcode):
return JsonResponse({
"error": f"Missing one or more required url parameters: address ({address}), zipcode ({zipcode})",
})
d... | [
"def isPincodePresent(arg0, context=None):\n if context.get(\"error\") is True:\n return {\n \"statusCode\": 4001,\n \"statusMessage\": context.get(\"error_response\", \"\")\n }\n response_dict = {\n \"statusCode\": 200,\n \"statusMessage\": \"Success\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test stations by distance function by checking length and validity of output | def test_stations_by_distance(): # Add test for stations_by_distance function
stations = build_station_list() # Create list of stations for testing
p = (0, 0) # p can be anything for the test
output = stations_by_distance(stations, p) # Use the tested function
assert len(output) > 0 # Ensure that i... | [
"def test_stations_within_radius(): # Add test for stations_within_radius function\n stations = build_station_list() # Create list of stations for testing\n centre = (53, -1) # Put the centre (roughly) in the middle of the UK\n # (according to the data from DEFRA, the extent of the stations is between\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test stations_within_radius function by having it find all the stations within a large radius and ensuring the output is all of the stations | def test_stations_within_radius(): # Add test for stations_within_radius function
stations = build_station_list() # Create list of stations for testing
centre = (53, -1) # Put the centre (roughly) in the middle of the UK
# (according to the data from DEFRA, the extent of the stations is between
# Lat... | [
"def stations_within_radius(stations, centre, r):\n\n # getting sorted stations and carrying forward exceptions\n try:\n sorted_stations = stations_by_distance(stations, centre)\n except Exception as e:\n raise e\n\n # validation for r\n if type(r) != float and type(r) != int:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test plot_on_map function by ensuring it gives some output | def test_plot_on_map():
list_of_stations = build_station_list() # Create list of stations to test from
assert plot_on_map(list_of_stations) != 0 # Unsure what the output of this function will
# look like, but should be non-zero (i.e. some output). | [
"def test_plotting_functions_with_cmaps(plot_func, cmap):\n plot_func(load_mni152_template(resolution=2), cmap=cmap, colorbar=True)\n plt.close()",
"def test_plot_error_map_backend_v1(self):\n backend = FakeKolkata()\n img_ref = path_to_diagram_reference(\"kolkata_error.png\")\n fig = p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Setter method for value, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/srlgs/srlg/config/value (uint32) | def _set_value(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
... | [
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_spanning_tree__stp_rstp_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, registe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Setter method for flooding_type, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/srlgs/srlg/config/flooding_type (mplssrlgfloodingtype) | def _set_flooding_type(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
... | [
"def _set_lsp_type_bypass(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-type-bypass\", rest_name=\"lsp-type-bypass\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_path... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Setter method for value, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/srlgs/srlg/config/value (uint32) | def _set_value(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
... | [
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_spanning_tree__stp_rstp_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, registe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Setter method for flooding_type, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/srlgs/srlg/config/flooding_type (mplssrlgfloodingtype) | def _set_flooding_type(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
... | [
"def _set_lsp_type_bypass(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-type-bypass\", rest_name=\"lsp-type-bypass\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_path... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the event(s) associated with the `address_id` given. | def get_by_address_id(
self,
address_id: UUID,
*,
raise_ex: bool = False
) -> tp.List[T]:
pass | [
"def get_event_detail(self, event_id):\n events = self.facebook.get_object(cat=\"single\", id=event_id, fields=['description', 'name', 'place', 'attending'])\n return events.get('description', None), events.get('name', None), \\\n events.get('place', None), events.get('attending', None)"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Represent a location given its id | def shn_gis_location_represent(id, showlink=True):
table = db.gis_location
try:
location = db(table.id == id).select(table.id,
table.name,
table.level,
table.parent,
... | [
"def test_facebook_get_location_by_id(self):\n location = get_location_info(location_id=120491747748)\n\n self.assertEqual(location.name, 'The Classic Cup')\n self.assertEqual(location.category, 'Restaurant/cafe')\n self.assertEqual(location.country, 'United States')\n self.assert... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper for gis_config prep and others where gis_config is a component. Hide location hierarchy fields above max allowed. Table definitions may include more levels than a particular site wants to allow. Rather than changing the definitions, hide the extra levels. Set defaults from the site config static defaults are spa... | def gis_config_prep_helper(r):
table = db.gis_config
table_max_level_num = int(reduce(
max, filter(lambda field: len(field) == 2 and field[0] == "L",
table.fields))[1])
if table_max_level_num > gis.max_allowed_level_num:
for n in range(gis.max_allowed_level_num ... | [
"def HideDefaults(self, name, cfg):\n my_default = self.GetDefault()\n my_default['name'] = name\n\n template = cfg.get('_template')\n if template:\n my_default.update(self._templates[template])\n my_default['_template'] = None\n\n d = {}\n for k, v in cfg.iteritems():\n if my_defau... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
On Accept for GIS Locations (after DB I/O) | def gis_location_onaccept(form):
if session.rcvars and hasattr(name_dummy_element, "onaccept"):
# HTML UI, not XML import
name_dummy_element.onaccept(db, session.rcvars.gis_location, request)
else:
location_id = form.vars.id
table = db.gis_location_name
names = db(table.l... | [
"def location():\n\n resource = request.function\n tablename = module + \"_\" + resource\n table = db[tablename]\n\n # Allow prep to pass vars back to the controller\n vars = {}\n \n # Pre-processor\n def prep(r, vars):\n\n # Restrict access to Polygons to just MapAdmins\n if d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
On Validation for GIS Locations (before DB I/O) | def gis_location_onvalidation(form):
# If you need more info from the old location record, add it here.
# Check if this has already been called and use the existing info.
def get_location_info():
if "id" in request:
query = (db.gis_location.id == request.id)
return db(query)... | [
"def validate_location():\n location = request.args.get('location')\n\n try:\n possible_locations = bundle_location_data(location)\n except NoLocationResultsError as e:\n return jsonify({'error': e.message})\n\n return jsonify(possible_locations)",
"def validateLocation(location: str) ->... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of Parents for a Location | def s3_gis_location_parents(r, **attr):
resource = r.resource
table = resource.table
# Check permission
if not s3_has_permission("read", table):
r.unauthorised()
if r.representation == "html":
# @ToDo
output = dict()
#return output
raise HTTP(501, body=s3x... | [
"def get_location_children(client: GraphqlClient, location_id: str) -> List[Location]:\n result = LocationChildrenQuery.execute(client, id=location_id)\n locations = result.location.children\n\n if len(locations) == 0:\n return []\n\n return [\n Location(name=location.name, id=location.id,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes a panorex using internal attribute arch. | def compute_panorex(self):
return self.arch_handler.create_panorex(self.arch) | [
"def update(self, arch=None):\n if arch is not None:\n self.set_arch(arch)\n self.set_panorex(self.compute_panorex())",
"def _setup_x86_arch(self):\n arch_mode = self.binary.architecture_mode\n\n # Set up architecture information\n self.arch_info = X86ArchitectureInfo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the arch with a new set of points, then recomputes panorex. | def update(self, arch=None):
if arch is not None:
self.set_arch(arch)
self.set_panorex(self.compute_panorex()) | [
"def updatePoints(self, x, y):",
"def compute_panorex(self):\n return self.arch_handler.create_panorex(self.arch)",
"def update_base_arch(self, param_set):\n for i in range(len(self.pc_arg_val)):\n self.base_arch.config_label[self.param_set_labels[i]] = param_set[i]\n self.pc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Applies offset to this Arch object | def offset(self, amount):
offsetted_arch = apply_offset_to_arch(self.arch, amount, self.poly[0])
self.update(offsetted_arch) | [
"def update_offset(self, new_offset):\r\n self.offset = new_offset",
"def offset(self, value):\n self._offset = value",
"def test_offset_adjustment(self):\n arch = get_arch('arm')\n obj1 = ObjectFile(arch)\n obj1.get_section('code', create=True).add_data(bytes(59))\n obj2 =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a copy of this Arch object | def copy(self):
arch = self.arch.copy()
return Arch(self.arch_handler, arch) | [
"def copy(self):\n copy = Atom(atom_type=self.type,\n position=self.position,\n if_pos=self.if_pos,\n charge=self.charge) # add more if necessary\n # carries no neighbors, hybridization, etc\n return copy",
"def create_copy(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the additional arguments of test's process. | def SetTestArgs(self, args):
if not args:
return
# The generated xctest is always empty. So set it directly.
self.SetXctestrunField('CommandLineArguments', args) | [
"def set_arguments(self):\r\n pass",
"def addArgs(self):\n \n self.createArgument('--fork', self.fork, 1, 'Fork to background', action='store_true')\n self.createArgument('--run', self.run, 1, 'Execute run on remote server (to be used with --client argument)', action='store_true')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the additional environment variables of app under test's process. | def SetAppUnderTestEnvVars(self, env_vars):
if not env_vars:
return
if self._test_type == ios_constants.TestType.XCUITEST:
key = 'UITargetAppEnvironmentVariables'
else:
key = 'EnvironmentVariables'
aut_env_vars = self.GetXctestrunField(key)
if not aut_env_vars:
aut_env_vars =... | [
"def prepare_environment_variables(self):\n for env_variable in self.environment_variables:\n for k, v in env_variable.iteritems():\n os.environ[k] = v",
"def setup_environment():\n global GIVEN_ENV\n GIVEN_ENV['env'] = env.copy()",
"def set_envs(self):\n for k, v i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the additional arguments of app under test's process. | def SetAppUnderTestArgs(self, args):
if not args:
return
if self._test_type == ios_constants.TestType.XCUITEST:
key = 'UITargetAppCommandLineArguments'
else:
key = 'CommandLineArguments'
self.SetXctestrunField(key, args) | [
"def set_script_args(self, args):\n global _env_args_str\n argstr = pickle.dumps(args)\n os.environ[_env_args_str] = argstr",
"def addArgs(self):\n \n self.createArgument('--fork', self.fork, 1, 'Fork to background', action='store_true')\n self.createArgument('--run', self.ru... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the specific test methods/test classes to skip in xctestrun file. | def SetSkipTests(self, skip_tests):
if not skip_tests:
return
self.SetXctestrunField('SkipTestIdentifiers', skip_tests) | [
"def test_skip(self):\n LOG.info('About to skip...')\n self.skipTest('No reason.')",
"def pytest_ignore(cls):\n cls.__test__ = False\n return cls",
"def test_classes_and_functions_excluded(self, flake8dir):\n\n # Setup\n flake8dir.make_setup_cfg(\"\"\"\n[flake8]\npytest_mark1 =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the specific field in the xctestrun file. | def GetXctestrunField(self, field):
try:
return self._xctestrun_file_plist_obj.GetPlistField(
'%s:%s' % (self._root_key, field))
except ios_errors.PlistError:
return None | [
"def get_field(self, x, y):\n return self.fields[x][y]",
"def get_field(self, field_name):\n\n field_names = field_name.split('.')\n return _find_field(self.__msg, field_names)",
"def get_field(self, field):\n value = self._fields.get(field)\n if value is None:\n ms... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if the specific field is in the xctestrun file. | def HasXctestrunField(self, field):
try:
self._xctestrun_file_plist_obj.GetPlistField(
'%s:%s' % (self._root_key, field))
return True
except ios_errors.PlistError:
return False | [
"def hasContent(field):\n file = field.file\n pos = file.tell()\n file.seek(0)\n ch = file.read(1)\n file.seek(pos)\n return ch != ''",
"def check_field_exists(curdir, fieldname):\n # TODO replace all those os.path.exists() calls by\n # check_fie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the field with provided value in xctestrun file. | def SetXctestrunField(self, field, value):
self._xctestrun_file_plist_obj.SetPlistField(
'%s:%s' % (self._root_key, field), value) | [
"def set(self, field, value):\n raise NotImplementedError",
"def set_value(self, config_field, include_doc=False):\n raise NotImplementedError",
"def set_field(self, field):\n return self.set_param('field', field)",
"def track_set(field_name, val, msg=track):\r\n success = util... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes the field with provided value in xctestrun file. | def DeleteXctestrunField(self, field):
self._xctestrun_file_plist_obj.DeletePlistField(
'%s:%s' % (self._root_key, field)) | [
"def delete_field(self):\n self.exec_command(b'DeleteField')",
"def delete_field(self, name):\n if 'idb_fields' in self.data:\n self.data['idb_fields'].remove(name)\n if name in self.data:\n del self.data[name]",
"def removeField(field):",
"def field_delete(self, cor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes the XctestRun object. If arg work_dir is provided, the original app under test file and test bundle file will be moved to work_dir/TEST_ROOT. | def __init__(self, app_under_test_dir, test_bundle_dir,
sdk=ios_constants.SDK.IPHONESIMULATOR,
test_type=ios_constants.TestType.XCUITEST,
signing_options=None, work_dir=None):
self._app_under_test_dir = app_under_test_dir
self._test_bundle_dir = test_bundle_dir
s... | [
"def __init__(self,kim_code,*args,**kwargs):\n super(TestDriver,self).__init__(kim_code,*args,**kwargs)\n self.executable = os.path.join(self.path, cf.TEST_EXECUTABLE)",
"def testWorkingDir(self):\n\n os.environ.pop(\"TUNE_ORIG_WORKING_DIR\", None)\n working_dir = os.getcwd()\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates the xctestrun file for XCUITest. The approach is creating a dummy project. Run 'buildfortesting' with the dummy project. Then the xctestrun file and XCTRunner app template will be under the build products directory of dummy project's derived data dir. | def _GenerateXctestrunFileForXcuitest(self):
dummyproject_derived_data_dir = os.path.join(self._work_dir,
'dummyproject_derived_data')
with dummy_project.DummyProject(
self._app_under_test_dir, self._test_bundle_dir, self._sdk,
self._test_type, se... | [
"def _GenerateXctestrunFileForXctest(self):\n dummyproject_derived_data_dir = os.path.join(self._work_dir,\n 'dummyproject_derived_data')\n with dummy_project.DummyProject(\n self._app_under_test_dir, self._test_bundle_dir, self._sdk,\n self._test_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates the xctestrun file for XCTest. The approach is creating a dummy project. Run 'buildfortesting' with the dummy project. Then the xctestrun file will be under the build products directory of dummy project's derived data dir. | def _GenerateXctestrunFileForXctest(self):
dummyproject_derived_data_dir = os.path.join(self._work_dir,
'dummyproject_derived_data')
with dummy_project.DummyProject(
self._app_under_test_dir, self._test_bundle_dir, self._sdk,
self._test_type, self... | [
"def _GenerateXctestrunFileForXcuitest(self):\n dummyproject_derived_data_dir = os.path.join(self._work_dir,\n 'dummyproject_derived_data')\n with dummy_project.DummyProject(\n self._app_under_test_dir, self._test_bundle_dir, self._sdk,\n self._tes... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates the xctestrun file for Logic Test. The approach is setting on xctestrun.plist directly and using `xctest` tool as the test host of the logic test bundle. | def _GenerateXctestrunFileForLogicTest(self):
self._xctestrun_file_path = os.path.join(
self._test_root_dir, 'xctestrun.plist')
test_bundle_name = os.path.basename(self._test_bundle_dir).split('.')[0]
plist_util.Plist(self._xctestrun_file_path).SetPlistField(
test_bundle_name, {})
self._... | [
"def _GenerateXctestrunFileForXctest(self):\n dummyproject_derived_data_dir = os.path.join(self._work_dir,\n 'dummyproject_derived_data')\n with dummy_project.DummyProject(\n self._app_under_test_dir, self._test_bundle_dir, self._sdk,\n self._test_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Moves the file under target directory and replace it if it exists. | def _MoveAndReplaceFile(src_file, target_parent_dir):
new_file_path = os.path.join(
target_parent_dir, os.path.basename(src_file))
if os.path.exists(new_file_path):
shutil.rmtree(new_file_path)
shutil.move(src_file, new_file_path)
return new_file_path | [
"def move_file(source, target):\n log.echo_info('move [%s] to [%s]' % (source, target))\n check_file_exists(source)\n shutil.move(source, target)\n log.echo_info('moving...')\n check_file_exists(target)",
"def _movefile(src, dest, **kwargs):\n\tif movefile(src, dest, **kwargs) is None:\n\t\traise p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a dictionary of a particular binding. The keys are camel cased binding field names defined in `init_params` list and | def get_dict_repr(self) -> Dict:
params = list(dict.fromkeys(getattr(self, 'init_params', [])))
for p in params:
if p not in Binding.EXCLUDED_INIT_PARAMS:
self._dict[to_camel_case(p)] = getattr(self, p, None)
return self._dict | [
"def bind_params(self) -> Dict[str, Any]:\n return {}",
"def _make_port_dict(self, port, fields=None):\n\n if not fields:\n port.update(self.base_binding_dict)\n else:\n for key in self.base_binding_dict:\n if key in fields:\n port.updat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the value of a particular setting attribute. | def get_settings_value(self, settings_attribute_key: str) -> Optional[str]:
return self.get_dict_repr().get(settings_attribute_key) | [
"def get_attribute_value(self, attribute_name):\n return self.attributes[attribute_name]",
"def get(self, setting):\n return self.settings.get(setting, \"\")",
"def get_setting(\n self,\n setting_name: webenginecore.webenginesettings.WebAttributeStr,\n ) -> bool:\n return s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a matrix indicating the master element for a given node | def generate_node_master_elem(self):
self.node_master_elem = np.zeros((self.num_node, 2), dtype=ct.c_int, order='F') - 1
for i_elem in range(self.num_elem):
for i_node_local in range(self.elements[i_elem].n_nodes):
if self.master[i_elem, i_node_local, 0] == -1:
... | [
"def get_inner_matrix(self):\n return self.matrix",
"def get_membership_matrix(self):\n import numpy as np\n matrix = []\n for i in self.clusters:\n matrix.append(self.clusters[i]['indicator'])\n matrix = np.array(matrix)\n return matrix",
"def get_adjacency_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
bits is a hex string returns a table mapping bitlengths to codes | def readACTable(bits):
table = {}
length = bits[:4]
tc = bits[4]
th = bits[5]
bits = bits[6:]
codeCounts = [0]*16
for i in range(16):
codeCounts[i] = int(bits[:2], 16)
bits = bits[2:]
for i in range(16):
numCodes = codeCounts[i]
table[i+1] = []
for _ in range(numCodes):
tabl... | [
"def parseBits(self, hexcode, width):\n bitarray = []\n for byte in hexcode[::-1]:\n bits = int(byte, 16)\n for x in range(4):\n bitarray.append(bool((2 ** x) & bits))\n bitarray = bitarray[::-1]\n return enumerate(bitarray[:width])",
"def string_to... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
table is a table of bit lengths and corresponding code words root is the root of the resulting huffman tree huffman is a dictionary mapping codes to bits | def createHuffmanTree(table):
root = HuffmanNode()
leaves = Queue.Queue()
root.left = HuffmanNode(root, 0)
root.right = HuffmanNode(root, 1)
leaves.put(root.left)
leaves.put(root.right)
huffman = {}
for key in range(1, len(table.keys())+1):
for code in table[key]:
leaf = leaves.get()
le... | [
"def huffman_code(node, code = ''):\n \n if node.get_word() != None:\n # Leaf reached\n return {node.get_word() : code}\n child1, child2 = node.get_children()\n tree = {}\n tree.update(huffman_code(child1, code + '1'))\n tree.update(huffman_code(child2, code + '0'))\n return tree"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove duplicate data that ``sort_col BETWEEN lower AND upper``. | def remove_duplicate(self, lower, upper, _raise_error=False):
self.drop_temp_table()
self.create_temp_table()
with self.engine.begin() as connection:
connection.execute(self.sql_insert_dupe_ids(lower, upper))
connection.execute(self.sql_insert_distinct_copy(lower, upper))... | [
"def _tidy(self) -> None:\n if self.no_overlap:\n self.remove_overlap(self.no_contiguous) # will sort\n else:\n self._sort()",
"def select_sort2(data):\n if len(data) <= 1:\n return data\n\n for i in range(len(data) - 1):\n max_idx = len(data) - 1 - i\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return min / max value in sort key. | def sort_key_min_max(self):
sql = select([func.min(self.sort_col), func.max(self.sort_col)])
results = self.engine.execute(sql).fetchall()
if len(results) == 1:
min_value, max_value = results[0]
return min_value, max_value
else:
raise ValueError | [
"def min_max_keys(d):\n\n # thinking to change the key list to an array, sort it, then take 0 element\n # for key_min and -1 for key_max\n\n if (len(d) > 0):\n key_list = [key for key in d.keys()]\n key_list.sort()\n return (key_list[0], key_list[-1])\n else:\n return ()",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all request names that haven't been tested by a uat job. | def get_untested(cls) -> List:
return db.session.query(RequestName). \
filter(
RequestName.uat_job_id == None # pylint: disable=singleton-comparison # noqa: E711;
).all() | [
"def get_unverified(cls) -> List:\n return db.session.query(RequestName). \\\n filter(\n RequestName.auto_analyse_result != RequestName.Results.ERROR.value,\n RequestName.uat_result == None # pylint: disable=singleton-comparison # noqa: E711;\n ).all()",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all request names that haven't been tested by a uat job. | def get_unverified(cls) -> List:
return db.session.query(RequestName). \
filter(
RequestName.auto_analyse_result != RequestName.Results.ERROR.value,
RequestName.uat_result == None # pylint: disable=singleton-comparison # noqa: E711;
).all() | [
"def get_untested(cls) -> List:\n return db.session.query(RequestName). \\\n filter(\n RequestName.uat_job_id == None # pylint: disable=singleton-comparison # noqa: E711;\n ).all()",
"def get_unexposed_user_log_names(self):\n raise SkipTest(\"No unexposed user l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
run select.select, with a timeout specified as a datetime object | def select_timeout(timeout, rlist=[], wlist=[], xlist=[]):
delta = timeout - datetime.datetime.now()
if delta.days >= 0:
assert(delta.days == 0) # unimplemented, and insane!
secs = delta.seconds + delta.microseconds / 1000000.0
assert(secs > 0)
return select.select(rlist, wlist, ... | [
"def doSelect(nfds: 'int', readfds: 'void *', writefds: 'void *', exceptfds: 'void *', usertimeout: 'timeval *') -> \"int\":\n return _coin.SoDB_doSelect(nfds, readfds, writefds, exceptfds, usertimeout)",
"def SoDB_doSelect(nfds: 'int', readfds: 'void *', writefds: 'void *', exceptfds: 'void *', usertimeou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse mssql banner information | def parse_banner_mssql(self, task: IscoutTask, level: int,
portinfo: PortInfo, resultfi: str):
try:
if not os.path.isfile(resultfi):
self._logger.error(
f"Resultfi not exists:\ntaskid:{task.taskid}\nbatchid:{task.batchid}\nresultfi:{resu... | [
"def metadata(soup):\n header = soup.find('div', {\"class\":\"stationTextHeader\"}).text.strip()\n return header.split('\\n')[:-1]",
"def _convert_tvdb_tvshow_metadata(tvdb_show, imdb_id, banners=True, language=\"en\"):\n info = {}\n if tvdb_show is None:\n return info\n info['tvdb_id'] = st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates the warehouse_unittest database, builds the schema and returns an SQLALchemy Connection to the database. | def database(request):
if os.getenv('WAREHOUSE_DATABASE_URL'):
# Assume that the database was externally created
url = os.getenv('WAREHOUSE_DATABASE_URL')
else:
# (Drop and) create the warehouse_unittest database with UTF-8 encoding
# (in case the default encoding was cha... | [
"def create_test_db(self):\n self.engine = sqlalchemy.create_engine(\"sqlite:///:memory:\")\n self.slave = self.engine\n self.metadata = Metadata()\n self.create_db()\n self.reset_db()",
"def test_create_database(self):\n\n # Setup the tables\n CreateDatabase.run(a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A helper function that takes a query and returns a function that will query the database and return a scalar. | def scalar(query, default=None):
def inner(model, *args, **kwargs):
val = model.engine.execute(query, *args, **kwargs).scalar()
if default is not None and val is None:
return default
else:
return val
return inner | [
"def first(query, default=None):\r\n def inner(model, *args, **kwargs):\r\n val = model.engine.execute(query, *args, **kwargs).first()\r\n\r\n if default is not None and val is None:\r\n return default\r\n else:\r\n return val\r\n\r\n return inner",
"def sql_query(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A helper function that takes a query and returns a function that will query the database and return the first row | def first(query, default=None):
def inner(model, *args, **kwargs):
val = model.engine.execute(query, *args, **kwargs).first()
if default is not None and val is None:
return default
else:
return val
return inner | [
"def query_and_return_the_first_row_where(statement):\n\n db = current.db\n s3db = current.s3db\n\n cmd = \"db(%s).select(\\\n limitby=(0,1) ).first()\" % statement\n logger.info(\"Executing query %s\" % cmd)\n\n output = eval(cmd)\n return output",
"def first(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A helper function that takes a query and returns a function that will query the database and return a list of rows with the row_func applied to each. | def rows(query, row_func=dict):
def inner(model, *args, **kwargs):
return [row_func(r) for r in
model.engine.execute(query, *args, **kwargs)]
return inner | [
"def run_query(conn, query):\n with conn.cursor(as_dict=True) as cursor:\n cursor.execute(query)\n for row in cursor:\n yield row",
"def get_results(query):\n with psycopg2.connect('dbname=news') as conn:\n cur = conn.cursor()\n cur.execute(query)\n return cur.f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A helper function that takes a query, a key_func, and a value_func and will | def mapping(query, key_func=lambda r: r[0], value_func=lambda r: r[1]):
def inner(model, *args, **kwargs):
return {
key_func(r): value_func(r)
for r in model.engine.execute(query, *args, **kwargs)
}
return inner | [
"def compose_keys(f, g):\n return lambda v: f(g(v))",
"def makeKeyGetter( k ):\n def myFunc( v ):\n return k( v[1] )\n print('making key getter for k=', k)\n return myFunc",
"def _create_getter(dct, lst, func):\n def _wrapper():\n return dct, lst, func\n return _wrapper",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate that the keys of the argument_dict passed match columns in table that are not in the blacklist list. return TypeError if there is a key where this condition is not met. | def validate_argument_column_mapping(argument_dict, table,
blacklist=None):
if blacklist is None:
blacklist = []
columns = set((c.key for c in table.columns if c.key not in blacklist))
for argument_name in argument_dict:
if argument_name not in colu... | [
"def _check_field_mappings(\n column_names: List[str],\n feature_table_name: str,\n feature_table_timestamp_column: str,\n feature_table_field_mappings: Dict[str, str],\n) -> None:\n\n if feature_table_timestamp_column not in column_names:\n raise ValueError(\n f\"Provided data sour... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The CacheControl generalheader field is used to specify directives that MUST be obeyed by all caching mechanisms along the request/response chain. | def surrogate_control(self):
def on_update(surrogate_control):
if not surrogate_control and "surrogate-control" in self.headers:
del self.headers["surrogate-control"]
elif surrogate_control: # pragma: no cover
self.headers["Surrogate-Control"] = \
... | [
"def cachecontrolheaders():\n t = get_template(\"templates/cachecontrol.html\")\n htmlcontent = t.render(Context({}))\n return(htmlcontent)",
"def _patch_header(response: HttpResponse, status: Status) -> None:\n # Patch cache-control with no-cache if it is not already set.\n if status == Status.SKI... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dump the last N days' updates as an RSS feed. | def rss(app, request):
releases = app.db.packaging.get_recently_updated(num=40)
for release in releases:
# TODO update _force_external to _external when Flask-ification is done
url = url_for(request, 'warehouse.packaging.views.project_detail',
project_name=release['nam... | [
"def rss(app, request):\n releases = app.db.packaging.get_recently_updated(num=40)\n for release in releases:\n # TODO update _force_external to _external when Flask-ification is done\n url = url_for(request, 'warehouse.packaging.views.project_detail',\n project_name=release... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dump the last N days' new projects as an RSS feed. | def packages_rss(app, request):
releases = app.db.packaging.get_recent_projects(num=40)
for release in releases:
# TODO update _force_external to _external when Flask-ification is done
url = url_for(request, 'warehouse.packaging.views.project_detail',
project_name=rele... | [
"def packages_rss(app, request):\n releases = app.db.packaging.get_recent_projects(num=40)\n for release in releases:\n # TODO update _force_external to _external when Flask-ification is done\n url = url_for(request, 'warehouse.packaging.views.project_detail',\n project_name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. | def run_migrations_offline():
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations() | [
"def run_migrations_offline():\r\n url = config.get_main_option(\"sqlalchemy.url\")\r\n context.configure(compare_type=True, url=url)\r\n\r\n with context.begin_transaction():\r\n context.run_migrations()",
"def run_migrations_offline():\n context.configure(\n url=url,\n target_me... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. | def run_migrations_online():
options = config.get_section(config.config_ini_section)
url = options.pop("url")
engine = create_engine(url, poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
... | [
"def __run_migrations_online() -> None:\n connectable: Engine = engine_from_config(\n config.get_section(config.config_ini_section), prefix=\"sqlalchemy.\", poolclass=pool.NullPool,\n )\n\n with connectable.connect() as connection: # type: Connection\n context.configure(connection=connection... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A simple helper that takes an app, request, template, and some context and constructs a TemplateResponse that will lazily render the template with the given context when the Response is evaluated. | def render_response(app, request, template, **context):
template = app.templates.get_template(template)
default_context = {
"config": app.config,
"csrf_token": functools.partial(helpers.csrf_token, request),
"gravatar_url": helpers.gravatar_url,
"static_url": functools.pa... | [
"def render(self, template, qcontext=None, lazy=True, **kw):\n response = Response(template=template, qcontext=qcontext, **kw)\n if not lazy:\n return response.render()\n return response",
"def on_template_response(self, context, **kwargs):\n request = kwargs.setdefault(\"re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a response object (a WSGI application) that, if called, redirects the client to the target location. Supported codes are 301, 302, 303, 305, and 307. 300 is not supported because it's not a real redirect and 304 because it's the answer for a request with a request with defined IfModifiedSince headers. | def redirect(location, code=302):
display_location = escape(location)
if isinstance(location, str):
location = iri_to_uri(location)
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h... | [
"def redirect(self, urls, code=None):\n return redirect(self.request, self.response, urls, code=code)",
"def process_response(self, request, response):\n if not self.redirected and response.status_code == 302: # This is a redirect\n referer = request.META.get('HTTP_REFERER')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalizes a package name as per PEP426 | def normalize_project_name(name):
name = re.sub("_", "-", name).lower()
if not PACKAGE_REGEX["permitted_characters"].match(name):
raise ValueError("name contains illegal characters! (See PEP-426)")
if not (PACKAGE_REGEX["start_with_alphanumeric"].match(name) and
PACKAGE_REGEX["end_w... | [
"def package_name(self, name: str) -> str:\n\n if name in self.package_aliases:\n return self.package_aliases[name]\n\n if not name:\n return name\n\n return \".\".join(\n self.package_aliases.get(part) or self._package_name(part)\n for part in name.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host and uses a safe scheme). Always returns ``False`` on an empty url. | def is_safe_url(url, host):
if not url:
return False
parsed = urllib.parse.urlparse(url)
return ((not parsed.netloc or parsed.netloc == host) and
(not parsed.scheme or parsed.scheme in ["http", "https"])) | [
"def is_safe_url(target):\r\n ref_url = urlparse(request.host_url)\r\n test_url = urlparse(urljoin(request.host_url, target))\r\n\r\n return test_url.scheme in ('http', 'https') and \\\r\n ref_url.netloc == test_url.netloc",
"def is_safe_url(target):\r\n ref_url = urlparse(request.host_url)\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pop key, value pair from front of dictionary. | def pop_front(self):
return self.dict.popitem(last=False) | [
"def popitem(self):\n heap = self._heap\n position = self._position\n\n try:\n end = heap.pop(-1)\n except IndexError:\n raise KeyError('pqdict is empty')\n\n if heap:\n node = heap[0]\n heap[0] = end\n position[end.key] = 0\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert keyvalue pair into end of dictionary. If the key already exists, the keyvalue pair will be moved to the end of the dictionary and the value will be updated. | def insert_end(self, key, value):
if key in self.dict:
self.dict.pop(key)
self.dict[key] = value
if len(self.dict) > self.maxsize:
self.dict.popitem(last=False) | [
"def insert(self, key, value):\n # hash the key and map that hash to a bucket\n hash_key = self.hash_function(key) % len(self.buckets)\n\n bucket = self.buckets[hash_key]\n\n for i, val in enumerate(bucket):\n # check if exists, and override if so\n if val[0] == key... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete an entry from the dictionary | def delete(self, key):
del self.dict[key] | [
"def _safe_delete(self, my_dictionary: Dict[Any, Any], key: Any) -> None:\n if key in my_dictionary:\n del my_dictionary[key]",
"def remove_item(self, key, value):\n ...",
"def delete(self,key):\n\t\tdel self.form_dict[key]",
"def safe_delete(mydict, key):\n if key in mydict:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starts the training asynchronously using the flask executor It runs the training based on the DSI_EXECUTE_ON environment variable and at the end, removes the future from the executor | def start_training():
logging.getLogger(__name__).info("Training execution started...")
# noinspection PyBroadException
try:
environment = execution_environment()
if environment == DSI_EXECUTE_ON_LOCAL:
if dvc_remote():
train(dvc_data_repo=dvc_remote(), dvc_ssh_us... | [
"def do_training():\n train_cls = Train()\n train_cls.run()",
"async def start_background_tasks(app):\n await license_init(app)\n await matlab_starter(app)",
"def main():\n\n config = SimCLRConfig.parse_arguments()\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(gpu) for gpu in config.gpu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download the trained model if any or reports 404 when the model is not available | def get_model():
if _executor.futures.running(TRAINING_KEY):
return jsonify({'error': "Model is not ready"}), 404
model_path = "{0}".format(GIT_COMMIT)
if os.path.exists(model_path):
file = open(model_path, 'rb')
return send_file(filename_or_fp=file,
mimetyp... | [
"def check_model():\n\n if not os.path.exists(MODEL_PICKLE_PATH):\n print('[*] Beginning model download from', MODEL_PICKLE_REMOTE_URL)\n download_file(MODEL_PICKLE_REMOTE_URL, MODEL_PICKLE_PATH)",
"def maybe_download():\r\n\r\n print(\"Downloading Inception 5h Model ...\")\r\n download.may... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reorders x_in based on prev hyp ids. | def ReOrderHyps(x_in):
if isinstance(x_in, tf.Tensor) and x_in.shape.ndims > 0:
# For rank > 1 tensors we make use of an efficient matmul based gather
# on tpu that takes in account the range of the values. For R1, we
# rely on the tf.gather and xla to optimize it efficiently for R1
... | [
"def reorder_series_by_ids(self, neworder, *, inplace=False):\n if inplace:\n out = self\n else:\n out = self.copy()\n\n neworder = [self.series_ids.index(x) for x in neworder]\n\n oldorder = list(range(len(neworder)))\n for oi, ni in enumerate(neworder):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use short_seq optimization when cur_step is smaller than limit. | def LoopContinueShort(cur_step, all_done, unused_step_ids,
unused_core_bs_states, unused_other_states_list):
return tf.math.logical_and(cur_step < p.short_seq_limit,
tf.math.logical_not(all_done)) | [
"def create_short_sequence():\n\n return final_sequences('short')",
"def first_500(seq):\n\n return seq[:500]",
"def shortest_seq(seq,threshold,dbg=False):\n assert None != seq\n assert None != threshold\n assert len(seq) > 0\n s_cur,s_min = 0,0 # current sum and minimum sum found.\n ix,ix_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Util for getting nested strucutre of shapes from structure of tensors. | def _GetShapes(tensors, none_shapes=False):
shapes = []
for t in tf.nest.flatten(tensors):
shape = t.get_shape() if isinstance(t, tf.Tensor) else None
if none_shapes:
if shape:
shapes.append(tf.TensorShape([None] * len(shape)))
else:
shapes.append(tf.TensorShape(None))
else:
... | [
"def nestshape(data):\n import ubelt as ub\n\n def _recurse(d):\n try:\n import torch\n except ImportError:\n torch = None\n if isinstance(d, dict):\n return ub.odict(sorted([(k, _recurse(v)) for k, v in d.items()]))\n\n clsname = type(d).__name__\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns ranks in [0, len(x)) | def compute_ranks(x):
assert x.ndim == 1
ranks = np.empty(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks | [
"def compute_ranks(self, x):\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n return ranks",
"def _columnRanks(u):\r\n\r\n out = np.zeros(u.shape)\r\n for j in np.arange(u.shape[1]):\r\n out[:, j] = _argrank(u[:, j])\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads a CSV file containing results. Produces a mapping of benchmark names to numerical results. | def read_results(file_name):
results = {}
with open(file_name, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in islice(spamreader, 1, None):
results[row[0]] = float(row[1])
return results | [
"def get_results(self):\n # read the csv report\n with open(self.aggr_all) as f:\n reader = csv.DictReader(f)\n results = dict()\n # built the benchmark\n for row in reader:\n # get needed information from row\n # TODO: Warum ma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Aggregates results. Takes a baseline and a number of other measurements, divides all measurements by the baseline on a perbenchmark basis. | def aggregate_results(baseline, *others):
def aggregate_benchmark(key, results):
if key in results and results[key] != 0.0:
return results[key] / baseline[key]
else:
return float('nan')
results = []
for key in sorted(baseline.keys()):
results.append((key, 1.... | [
"def _calculate_baseline(self):\n if self.data_counter % self.window_step == 0 and len(self.data_queue) == self.window_length:\n measurement = util.to_list(self.measurement_func(list(self.data_queue)))\n if self.baseline is None:\n self.baseline = [[feature] for feature i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Aggregates result files for a particular category of benchmarks. | def aggregate_category(baseline_file, *other_files):
baseline = read_results(baseline_file)
others = [read_results(name) for name in other_files]
return aggregate_results(baseline, *others) | [
"def cat(ctx, files):\n\n gb = GoogleBenchmark()\n for file in files:\n gb += GoogleBenchmark(stream=file)\n click.echo(gb.json())",
"def summariseFiles(origfiles, aggfiles, outputfile):\n \n summaries = []\n for (of, af) in zip(origfiles, aggfiles): \n summer = Summariser(of... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compares this node's attribute with the entry's attribute | def compareAttributes(self, entry):
if self.attributeType == 'str':
# Must be a categorical attribute, so compare this node's category with that of the entry
return self.comparisonValue == entry.attributes[self.attribute]
else:
# Must be a numeric attribute
... | [
"def compare(self, other: Attribute) -> float:\n return int(self._equal(self.value == other.value))",
"def match(self, compared_attribute):\n for k, v in self.attribute.items():\n if not (k in compared_attribute and compared_attribute[k] == v):\n return False\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |