query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Compute total loss value in the collections
def _total_loss(self, collections=None, name=None): if collections is None: collections = [GKeys.LOSSES] loss_vars = [] for key in collections: loss_vars.extend(tf.get_collection(GKeys.LOSSES)) total_loss = tf.add_n(loss_vars, name=name) return total_l...
[ "def calc_loss_total(self):\n loss = self.calc_loss()\n return tf.reduce_sum(loss)", "def get_loss(self):\n return self.loss / self.cnt", "def loss(self):\n pass", "def sum_factors(self) -> float:\n return sum([x for x in self._loss_dict.values()])", "def get_total_loss(add_re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flatten tensor to shape [1, size]
def _flatten(self, inputT, size): return tf.reshape(inputT, (-1, size))
[ "def flatten_(t: Tensor) -> Tensor:\n return reshape_(t, (-1,))", "def flatten(x_tensor):\n # TODO: Implement Function\n b, w, h, d = x_tensor.get_shape().as_list()\n img_size = w * h * d\n return tf.reshape(x_tensor, [-1, img_size])", "def flatten(x_tensor):\n flat_dimension = np.prod(x_tenso...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
String representation for Dice.
def __str__( self ): return "Die1: %s\nDie2: %s" % ( str(self.die1), str(self.die2) )
[ "def __str__(self):\n return \"Die with {} sides. Result : {}\".format(self._sides, self._value)", "def dice_status_to_string(self):\r\n dice_status = \"Dice to play: \"\r\n\r\n if self.die1 != 0 and self.die2 != 0:\r\n dice_status += str(self.die1) + \" and \" + str(self.die2)\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ermittelt, welches Fest zum gegebenen Datum begangen wird. Dabei wird die Rangfolge berücksichtigt. Bei gleichem Rang überwiegen erstgenannte Feste in der Liste "feste" oben.
def fest(d = None): if d == None: d = date.today() fest = None for f in FestManager.feste: if not f.isDate(d): continue if fest == None: fest = f; continue if f.rang() > fest.rang(): fest = f return fest
[ "def sorterdato( vegrefliste, valgtdato='' ): \r\n \r\n idag = datetime.datetime.now().strftime( '%Y-%m-%d') \r\n p = 'properties' # Shortcut \r\n baklengs_sortert = sorted(vegrefliste, reverse=True, key = lambda r: r[p]['fradato'] )\r\n resultat = []\r\n \r\n if not valgtdato: \r\n valg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the value of a register chosen by its index
def set_register(self, index, value): if index < 0 or index > 32: raise Exception('Register out of index') self.register[index].set_value(str(value))
[ "def seti(a, b, c, register):\n\tregister[c] = a", "def seti(self, a, b, c):\n self.registers[c] = a", "def set(self, idx, value):\r\n self._update(idx, value)", "def set_value(self, idx, value):\n idx = (idx + self._start) % self._capacity\n self._set_idx(idx, value)", "def __se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the value of the program counter
def set_pc(self, value): self.program_counter.set_value(str(value))
[ "def set_counter(self, counter: int, value: int) -> None:\n check_counter(counter)\n self.write_long(27 + (counter - 1) * 2, value)\n return", "def _set_counter(cls, number: int):\n cls.__counter = number", "def PC_set(val):\r\n global PC\r\n \r\n #Sanity checking\r\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the value of the storage
def set_storage(self, value): self.storage.setPlainText(str(value))
[ "def set(self, value):\n self._storage.set(self._item, value)", "def storage_set(self, key, value):\n if not self._module:\n return\n self._storage_init()\n module_name = self._module.module_full_name\n return self._storage.storage_set(module_name, key, value)", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fills the symboltable with parsed labels and addresses
def set_symbols(self, symboltable: dict): for index in range(1, self.symbol_layout.rowCount()): self.symbol_layout.removeRow(index) font = QFont('Fira Code', 8, QFont.Medium) for entry in symboltable: symbol = QLineEdit() symbol.setReadOnly(True) ...
[ "def label_parse(self):\r\n # map labels\r\n for line in self._data:\r\n if len(line) > 0 and line[0] == '(':\r\n temp = line.split(')')[0][1:]\r\n self._symbol_table[temp] = self._next_mem_slot\r\n self._next_mem_slot -= 1\r\n self._n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turn on the scene.
def run(self) -> None: self._hass.turn_on('scene.{0}'.format(self._args['scene']))
[ "def on_enable_scene(self):", "def turn_on(self, **kwargs):\n self._state = True\n self.update_ha_state()", "def TurnLightOn(self):\n self.isOn = True\n self.mainWin.settings_general_tip_of_the_day = 1\n self.setPixmap(self.lightswitchOnPixmap)\n self.repaint()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
print header from fits file to either stdout or to a file
def print_header(fitsfile, ext=0, ofileh=sys.stdout): hdr = fitsio.read_header(fitsfile, ext=ext) ofileh.write(f"{hdr}") ofileh.write("\n")
[ "def printheader(input,):\n from astropy.io import fits\n\n with fits.open(input) as hdulist:\n hdulist.info()\n for hdu in hdulist:\n print(repr(hdu.header))", "def print_header(name, texfile):\n texfile.write('\\n')\n texfile.write('%--------------------\\n')\n texfile.wr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creating fake profiles for given number of people using namedtuples
def init_profiles_using_namedtuple(no_profiles: int): profiles = [] Profile = namedtuple('Profile', fake.profile().keys()) for _ in range(no_profiles): profiles.append(Profile(**fake.profile())) return profiles
[ "def __call__(self, size, last_name_pool=None):\n if last_name_pool is None:\n last_name_pool = self.last_name_pool(size)\n for _step in range(size):\n gender = self.random_gender()\n yield {\n 'birthday': self.random_birthday(),\n 'gender...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function finds the oldest person from the slot, calculates the \ duration. The minimum birthdate and time is returned.
def oldest_person_nt(all_profile_nt: namedtuple) -> float: """Param: all_profile_nt: Named tuple containing all profiles""" value = min(all_profile_nt, key=lambda v: v[-1]) date_today = datetime.date.today() age = (date_today - value.birthdate).days return int(age/365)
[ "def calc_min_residence_time(self):\r\n pass", "def get_oldest_person(person_list):\n return max(person_list, key=lambda x: x.age)", "def minStartTime(listAct):\n maxAct = max(listAct, key=lambda activity: activity.seq + activity.duration)\n return maxAct.seq + maxAct.duration", "def reserveti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function uses the mode function defined in statisics library to find \ the most occured blood group from the list. The list is generated \ using the lambda function and returned to the mode function as a \ parameters. The code is then timed and the result and time is \ sent back.
def max_bloodgroup_nt(all_profile_nt: namedtuple) -> tuple: """Param:all_profile_nt: Named tuple containing all profiles""" blood_group = mode(list(map(lambda v: v[5], all_profile_nt))) return blood_group
[ "def find_max_blood_group_dict(profile_list: 'list of Fake profiles (dictionaries)'):\n aplus_cnt = sum([profile_list[i]['blood_group'] == 'a+' for i in range(len(profile_list))])\n aminus_cnt = sum([profile_list[i]['blood_group'] == 'a-' for i in range(len(profile_list))])\n bplus_cnt = sum([profile_list[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
To create a fake stock data set for imaginary stock exchange for \ top 100 companies (name, symbol, open, high, close). \
def stock_market(no_profiles: int) -> tuple: all_companies = [] Stocks = namedtuple("Stocks", 'name symbol open high close company_weight') MkValue_ = random.uniform(1000, 50000, 100) wts_ = random.uniform(0, 1, 100) wts_ = wts_/sum(wts_) for _ in range(100): name = fake.company() ...
[ "def test_market_stocks_get(self):\n pass", "def import_single_stock(ticker_symbol, date_min, date_max):\n stock_data_df = pdr.get_data_yahoo(ticker_symbol, start=date_min, end=date_max)\n stock_data_df['ticker_symbol'] = ticker_symbol\n stock_data_df.columns = ['high', 'low', 'open', 'close', 'vo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Explains the model with LimeExplainer
def explain_model_with_lime( model, data_to_explain=None, train_data=None, total_data=None, examples_to_explain: Union[int, float, list] = 0, ) -> "LimeExplainer": if total_data is None: train_x = train_data test_x = data_to_explain test_y = None ...
[ "def explainer(model, text):\r\n from lime.lime_text import LimeTextExplainer\r\n\r\n model = Explainer(model)\r\n\r\n explainer = LimeTextExplainer(\r\n split_expression=lambda x: x.split(),\r\n bow=False,\r\n class_names=[\"positive probability\"]\r\n )\r\n\r\n exp = explainer....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does Non Max Suppression given bboxes
def non_max_suppression(bboxes, iou_threshold, threshold, box_format="corners"): # 49 x 6 assert type(bboxes) == list # print(bboxes) bboxes = [box for box in bboxes if box[1] > threshold] bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True) bboxes_after_nms = [] # print(bboxes) w...
[ "def iou_suppression(cnt_box, yolo_box, max_threshold, min_threshold):\n all_boxes = []\n pre_bboxes = yolo_box\n bboxes = cnt_box\n for i in range(len(pre_bboxes)):\n max_flag = 0\n min_flag = 0\n for j in range(len(bboxes)):\n\n (pre_x1, pre_y1) = (pre_bboxes[i][0], pre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
out product of state1 and state2
def ketbra(state1, state2): state1 = normalize(state1) state2 = normalize(state2) return np.outer(state1.conj(), state2)
[ "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def product(a, b):\n \n return a * b", "def tensor_product(self, other, prod_sys=None):\n prod_graph = nx.product.tensor_product(self, other)\n # not populating ?\n if prod_sys is None:\n if self.stat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
density matrix of a ensemble of quantum states
def density(ensembles): if len(ensembles.shape) < 2: return ketbra(ensembles) else: den_mat = ketbra(ensembles[0]) for i in range(1, len(ensembles)): den_mat += ketbra(ensembles[i]) den_mat /= len(ensembles) return den_mat
[ "def gen_density_matrix(states=None, dimensions=None):\n if states is None:\n tdim = np.prod(dimensions)\n dmtotal0 = np.eye(tdim) / tdim\n\n return dmtotal0\n\n dmtotal0 = np.eye(1, dtype=np.complex128)\n\n for i, s in enumerate(states):\n\n if not hasattr(s, \"__len__\"):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fidelity between state1 and state2, only valid for pure state1
def fidelity(state1, state2): if len(state1.shape) > 1: print("error: state1 must be a pure state.") state1 = normalize(state1) fid = 0 if len(state2.shape)<2: state2 = normalize(state2) fid = np.abs( braket(state1, state2) )**2 else: for i in rang...
[ "def correct_state(self, state, diff=True):\n return state", "def test_update_state2(self):\n pass", "def is_state_equivalent(self, state1, state2):\n return state1 == state2", "def requested_state():", "def test_update_state1(self):\n pass", "def checkState(self):", "def tes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
метод получения первичных ключей и атрибута waitFinish тест кейсов. Запрос осуществляется в таблицу TestCaseRunConfiguration, который возвращает строки в порядке sequenceNumber по конкретной конфигурации запуска.
def get_run_config_test_cases(self, run_config_id: int) -> tuple: sql_cmd = "SELECT idTestCaseConfig, idTestCase, waitFinish FROM TestCaseRunConfiguration WHERE idRunConfig = ?" \ " ORDER BY sequenceNumber" self._cursor.execute(sql_cmd, [run_config_id]) result: tuple = self._cu...
[ "def test_run_started(self):", "def test_consumer_logs_task_started_nth_rety(self):\n due_at = now() - timedelta(milliseconds=100)\n task = create_task(function_name=\"tests.fixtures.failing\", due_at=due_at)\n\n consumer = Consumer()\n consumer.execute_tasks()\n\n with self.ass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Метод получения списка id тестов в тест кейсе. Запрос осуществляется в таблицу TestCaseTestRelation по id тест кейса в порядке sequenceNumber.
def get_test_case_tests(self, test_case_id: int) -> tuple: sql_cmd = "SELECT idTest FROM TestCaseTestRelation WHERE idTestCase = ? ORDER BY sequenceNumber" self._cursor.execute(sql_cmd, [test_case_id]) result: tuple = self._cursor.fetchall() assert result, "[0002] Не добавлено ни одного ...
[ "def test_lists_id_get(self):\n pass", "def getTestsIds():\n with driver.session() as s:\n ids = s.write_transaction(getTestsId)\n\n tIds = []\n for idEl in ids:\n tIds.append(idEl[\"ID(t)\"])\n\n return tIds", "def get_list_ids():", "def test_get_batch_by_id(self):\n p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Метод получения списка конфигураций теста. Запрос осуществляется в таблицу TestRunConfiguration, таким образом для каждого теста с помощью использования sequenceNumber определяется id конфигурации (вх. данные).
def get_tests_configs_info(self, test_case_run_config_id: int) -> tuple: sql_cmd = "SELECT idTestConfig, threadsCount, waitTime FROM TestRunConfiguration WHERE idTestCaseConfig = ? " \ "ORDER BY sequenceNumber" self._cursor.execute(sql_cmd, [test_case_run_config_id]) result: tu...
[ "def get_seeds(self):\n seed_list = []\n with open('config.txt','r') as f:\n for lines in f.readlines():\n seed_list.append((lines.split(\":\")[0],int(lines.split(\":\")[1])))\n return seed_list", "def teststep_config(self):\n teststep_key_names = self.config[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Метод получения сохраненной конфигурации для теста (json|input_data). Запрос осуществлеятся в таблице TestConfiguration. Получение значения атрибута inputData.
def get_test_config(self, test_id: int, config_id: int) -> dict: sql_cmd = "SELECT inputData FROM TestConfiguration WHERE idTest = ? AND idTestConfig = ?" self._cursor.execute(sql_cmd, [test_id, config_id]) result: tuple = self._cursor.fetchall() assert result, "[0004] Указанной конфигур...
[ "def config_data():\n return {CONF_USERNAME: \"fake\", CONF_PASSWORD: \"user\"}", "def test_get_configuration_start_form_data(self):\n pass", "def read_input_file():\n \n global input\n \n config = ConfigParser.RawConfigParser()\n config.read(os.path.join(os.getcwd(), 'INPUT.cfg'))\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NXDOMAIN records to Redis. Scheduled with RedisHandler.submit().
def nx_to_redis(self, backlog_timer, client_address, name): if self.stop: return if PRINT_COROUTINE_ENTRY_EXIT: PRINT_COROUTINE_ENTRY_EXIT("START nx_to_redis") if DNS_STATS: timer = self.answer_to_redis_stats.start_timer() self.redis_executor(self.nx...
[ "def post(host):\n redis.setex('dispatcher',host,60)\n timer = threading.Timer(20.0, post, args=[host])\n timer.daemon = True\n timer.start()", "def _store_in_redis(items, redis_key):\n r = Redis(db=9)\n # store results in a list\n try:\n for item in items:\n r.push(redis_ke...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Analyze and post to the ShoDoHFlo redis database.
def post_to_redis(self, message): if self.message_type and message.field('type')[1] != self.message_type: if self.performance_hint: logging.warn('PERFORMANCE HINT: Change your Dnstap config to restrict it to client response only.') self.performance_hint = Fal...
[ "def post(host):\n redis.setex('dispatcher',host,60)\n timer = threading.Timer(20.0, post, args=[host])\n timer.daemon = True\n timer.start()", "def submit(id, host):", "def app_index_job(cls):\n import time\n s = time.time()\n print('init--redis')\n news = json.dumps(DB....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
u"""setParameters(IZParameters) > void Sets the IZParameters normally during creation of menu items.
def setParameters(self, izParameters): #$NON-NLS-1$
[ "def updateParameters(self, parameters):\r\n super(Tool, self).updateParameters(parameters)", "def set_parameters(self, params):\n self.kp = params.pgain", "def addParameters(self, parameters, is_default = False):\r\n if is_default:\r\n self.default_parameters = parameters\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NDVI with wrong bands
def _test_ndvi_incorrect_bands(self): scene = Landsat8Scene(self.filenames) self.assertEquals(scene.band_numbers, 8) try: scene2.ndvi() except SatProcessError as e: self.assertEquals(e.message, 'nir band is not provided') scene2 = scene.select(['nir', 'b...
[ "def test_ndvi():\n # Create 4-band image simulating RGBN as needed for NDVI\n test_image, _ = FakeGeoImage(\n 300, 150, 4, \"uint16\", out_dir=Path(\"/tmp\"), crs=4326, nodata=0, nodata_fill=3, cog = False\n ).create(seed=42)\n\n ndvi_image = ndvi(test_image)\n\n with rio.open(str(ndv...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the patients that are currently in the Intensive Care.
def get_patients_in_ic(self): query = "SELECT * FROM patients WHERE datetime_discharge IS NULL" return self.mysql_obj.fetch_rows(query)
[ "def get_patients(self):\r\n return self.__patients_list", "def get_all_incidents():\n allIncidents = Incident.get_all()\n #allCops = get_all_cops()\n incidents = []\n for i in allIncidents:\n if(\n# (i['operations_center']['id'] in allCops) and\n#conf\n (i['operati...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all signal values for patient.
def get_signal_values_for_patient(self, patient_id): query = \ """ SELECT s.name, psv.value, psv.time FROM patient_signal_values psv INNER JOIN signals s ON psv.signal_id = s.id WHERE patient_id = %(patient_id)s """ ...
[ "def get_raw_signals(self):\n signals, fields = wfdb.rdsamp(self.patient_number, pb_dir='mitdb', warn_empty=True)\n logging.info(\"Patient {} additional info: {}\".format(self.patient_number, fields))\n return signals, fields", "def get_signal_list(td):\n\n sig_list = td.getSignalList()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
computes diag(v1) dot M dot diag(v2). returns np.ndarray with same dimensions as M
def v1Mv2(v1, M, v2): return v1[:, None] * M * v2[None, :]
[ "def vdot(m1, m2):\n\n err_code = ct.c_int(0)\n res = _cudanet.vdot(m1.p_mat, m2.p_mat, ct.byref(err_code))\n\n if err_code:\n raise generate_exception(err_code.value)\n\n return res", "def vdot(m1, m2):\n\n err_code = ct.c_int(0)\n res = _eigenmat.vdot(m1.p_mat, m2.p_mat, ct.byref(err_code))...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return windows of indices into the flattened data. data[index_matrix[i]] returns the flattened window around the ith element.
def create_index_matrix(data_shape, window_shape): n_data = np.prod(data_shape) n_window = np.prod(window_shape) box = np.indices(window_shape) index_matrix = np.zeros((n_data, n_window), dtype=np.int32) shifts = np.unravel_index(np.arange(n_data), data_shape) offset = (np.array(window_shape)-1)...
[ "def extract_windows(data, size, lap=0):\n values = data.values\n max_size = values.size\n offset = max_size % (size + lap)\n cor_size = max_size - offset\n col = data.index.values\n df = pd.DataFrame()\n for j in range(0, int(cor_size / size) + 1):\n for i in range(0, data.shape[0]):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorate tensorflow op graph building function with name_scope. Name defaults to function name.
def scope_op(name=None): def decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): with tf.name_scope(name or function.__name__): return function(*args, **kwargs) return wrapper return decorator
[ "def add_name_scope(text):\n def decorator(func):\n def wrapper(*args, **kwargs):\n with tf.name_scope(text):\n return func(*args, **kwargs)\n return wrapper\n return decorator", "def nameop(op, name):\n op = tf.identity(op, name=name)\n return op", "def scope...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gather windows of tensor around centers. Uses wrapped padding.
def gather_windows(x, centers, system_shape, window_shape): window_size = np.prod(window_shape) batch_size = tf.shape(x)[0] index_matrix = tf.constant(create_index_matrix(system_shape, window_shape)) window_range = tf.range(batch_size, dtype=tf.int32)[:, None] * \ tf.ones(window_size, dtype=tf.i...
[ "def update_windows(x, centers, updates, mask, system_shape, window_shape):\n window_size = np.prod(window_shape)\n batch_size = tf.shape(x)[0]\n index_matrix = tf.constant(create_index_matrix(system_shape, window_shape))\n window_range = tf.range(batch_size, dtype=tf.int32)[:, None] * \\\n tf.on...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update windows around centers with updates at rows where mask is True.
def update_windows(x, centers, updates, mask, system_shape, window_shape): window_size = np.prod(window_shape) batch_size = tf.shape(x)[0] index_matrix = tf.constant(create_index_matrix(system_shape, window_shape)) window_range = tf.range(batch_size, dtype=tf.int32)[:, None] * \ tf.ones(window_s...
[ "def update_mask(self, indices):\n\n indices = indices.view(self.batch_size, -1)\n updated_mask = torch.zeros_like(self.mask.squeeze(-1)).scatter_(1, indices, 1)\n\n return updated_mask.unsqueeze(-1)", "def update_mask(self, new_atom):\n if self.Wf is not None: \n # convert ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gather all windows of tensor.
def all_windows(x, system_shape, window_shape): index_matrix = tf.constant(create_index_matrix(system_shape, window_shape)) return tf.transpose( tf.gather_nd(tf.transpose(x), tf.expand_dims(index_matrix, 2)), [2, 0, 1])
[ "def gather_windows(x, centers, system_shape, window_shape):\n window_size = np.prod(window_shape)\n batch_size = tf.shape(x)[0]\n index_matrix = tf.constant(create_index_matrix(system_shape, window_shape))\n window_range = tf.range(batch_size, dtype=tf.int32)[:, None] * \\\n tf.ones(window_size,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize public and private key variables.
def __init__(self): self.public_key = None self._private_key = None
[ "def __init__(self):\n self._keypair = RSA.generate(2048)\n self.public_key = self._keypair.publickey().exportKey()", "def __init__(self, curve=None, private_key=None, public_key=None):\n self.curve = curve\n self.private_key = None\n self.public_key = None\n if private_k...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encrypt 'message' with a public key and return its encryption as a list of integers. If no key is provided, use the 'public_key' attribute to encrypt the message.
def encrypt(self, message, key=None): #Check validity of public key if self.public_key is None: raise Exception("invalid public key!") elif pub_key is None: e = self.public_key[0] n = self.public_key[1] else: e = pub_key[0] n ...
[ "def encrypt(self, message, key=None):\n if key is None:\n key = self.public_key\n encrypter = RSA.importKey(key)\n return encrypter.encrypt(message, 2048)", "def encrypt(message):\n setup()\n\n # Convert message to integer representation\n m = ''\n for letter in messag...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decrypt 'message' with the private key and return its decryption as a single string. You may assume that the format of 'message' is the same as the output of the encrypt() function.
def decrypt(self, message): #check validity of _private_key if self._private_key is None: raise Exception("invalid private key") output = "" d = self._private_key[0] n = self._private_key[1] for i in xrange(len(ciphertext)): m = pow(ciphertext[i...
[ "def decrypt(private_key, msg):\n return private_key.decrypt(msg)", "def decrypt_message(encrypted_message):", "def decrypt(self, message) -> str:\n if self._enabled:\n if self._method == 'caesar':\n return self._caesarcipherdecrypt(message)\n elif self._method == ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use Fermat's test for primality to see if 'n' is probably prime. Run the test at most five times, using integers randomly chosen from [2, n1] as possible witnesses. If a witness number is found, return the number of tries it took to find the witness. If no witness number is found after five tries, return 0.
def is_prime(n, number_of_tests=5): passes = 0 prime = True #assume prime for i in xrange(number_of_tests): passes += 1 random_int = random.randint(2, n-1) test = pow(random_int, n-1, n) if test != 1: prime = False break if prime: return 0 ...
[ "def Ballie_PSW_test(n, max_trivial_trials=100):\n for i in range(max_trivial_trials):\n if primes[i] == n:\n return True\n if n % primes[i] == 0:\n return False\n if primes[i] ** 2 >= n:\n return True\n if not fermat_strong_test(n, 2):\n return Fal...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the _keypair and public_key attributes.
def __init__(self): self._keypair = RSA.generate(2048) self.public_key = self._keypair.publickey().exportKey()
[ "def __init__(self):\n self.public_key = None\n self._private_key = None", "def initialize(self):\n super(self.__class__, self).initialize()\n\n try:\n self.__keypair = nova_utils.get_keypair_by_name(\n self._nova, self.keypair_settings.name)\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encrypt 'message' with a public key and return its encryption. If no key is provided, use the '_keypair' attribute to encrypt 'message'.
def encrypt(self, message, key=None): if key is None: key = self.public_key encrypter = RSA.importKey(key) return encrypter.encrypt(message, 2048)
[ "def encrypt(public_key, message):\n return public_key.encrypt(\n message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def encrypt(public_key, msg, precision=None):\n return publi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends an XGRequest to the host and parses output into a XGResponse object.
def send_request(self, request, strip=None, retry=True): data = request.to_xml() if self.debug: self.log('sending:\n{0}'.format(data)) try: resp = self._handle.open(self.request_url, data) resp_str = resp.read() if self.debug: sel...
[ "def _xml_command(self, request):\n response = self._send(request)\n self._check_response(response)\n return response", "def do_external_request(self, cmd, extra_payload):\r\n xmlstr = etree.tostring(self.xml, pretty_print=True)\r\n payload = {\r\n 'xml': xmlstr,\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve a "flat" list of XGNode objects based on node_names. If you wish to perform some iteration over a representational hierarchy, use get_node_tree() instead. This takes similar arguments to get_node_values() but returns all the node infomation received from the gateway in terms of XGNode objects.
def get_nodes(self, node_names, nostate=False, noconfig=False): return self._get_nodes(node_names, nostate, noconfig, flat=True)
[ "def get_nodes(self, names):\n nodes = []\n for name in names:\n node = self.get_node(name, prevent_error=True)\n if node == None:\n if verbose:\n print('Warning: could not find a TreeNode named {}.'.format(name))\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs a 'set' using the nodes specified and returns the result.
def perform_set(self, nodes=[]): # Input validation try: # Works for XGNodeDict input set_nodes = nodes.get_updates() except (AttributeError, TypeError): # Assume list instead set_nodes = nodes if not isinstance(set_nodes, list): ...
[ "def make_set(node):\n node.parent = node\n node.rank = 0", "def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)", "def test_set_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets nodes per dict with node name > value mappings.
def set_nodes_values(self, node_dict): # Requires nodes to have type defined in lookup array raise Exception("Not yet implemented.")
[ "def set_nodes(self):\n for name, cols in self._nodes.items():\n data = self.get_node(cols)\n setattr(self, name, data)", "def set_values(self, new_values):\n for name, value in new_values.items():\n self.nodes_db.loc[name][\"node\"].set_value(value)", "def add_pro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns relative frequency of a category, and words linked to categories
def getAll(text): URIs = getURIs(text) categories=getCategories(URIs[0], URIs[1] ) return categoryFrequency(categories[0]),categories[1]
[ "def lp(word, category, unique, k, name=\"category\"):\n\t\tp1 = category.count(word) + k\n\t\tp2 = len(category) + unique\n\t\tprint(word + \" in \"+name+\": \" + str((p1 * 1.0) / (p2 * 1.0)))\n\t\treturn (p1 * 1.0) / (p2 * 1.0)", "def liwc(text,output='rel',lang='nl'):\n\n# decide on relative or absolute freque...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns searcher with boosts applied
def apply_boosts(searcher): return searcher.boost( question_title=4.0, question_content=3.0, question_answer_content=3.0, post_title=2.0, post_content=1.0, document_title=6.0, document_content=1.0, document_keywords=8.0, document_summary=2.0, ...
[ "def search_boost(self, search_boost):\n\n self._search_boost = search_boost", "def do_search_weight(self, args, carryover=False):\n self._perform_search(args, self.search.byWeight, converter=self.weight_converter, carryover=carryover)", "def search_space() -> Mapping[str, Any]:\n\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write given timeseries to Cloud Monitoring.
def write_time_series(host_project_id, series): client = monitoring_v3.MetricServiceClient() project_id = 'projects/%s' % host_project_id try: client.create_time_series(request={ 'name': project_id, 'time_series': [series] }) return True except exceptions....
[ "def test_time_series_file_write(self):\n # Query and invoke write method\n self._query_n_write_multiple(TimeSeriesFile, 'ohl')\n\n # Test\n self._compare_files(self.original, self.name, 'ohl')", "def insert_timeseries(pool, timeseries, tms_id, end_date=None):\n new_timeseries = []\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract timeseries data from MQL query response.
def _extract_mql_timeseries_data(response): lkeys = response['timeSeriesDescriptor'].get('labelDescriptors', []) # (fixme): Is there a better way to fetch and extract this data? for result in response.get('timeSeriesData', []): data = {} lvalues = result.get('labelValues', []) data =...
[ "def query_timeseries_mql(project_id, mql):\n project_name = _PROJECTS % project_id\n client = gcp.monitoring_service()\n # pylint:disable=no-member\n request = client.projects().timeSeries().query(name=project_name,\n body={'query': mql})\n # pylint:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Query timeseries for a project using mql.
def query_timeseries_mql(project_id, mql): project_name = _PROJECTS % project_id client = gcp.monitoring_service() # pylint:disable=no-member request = client.projects().timeSeries().query(name=project_name, body={'query': mql}) # pylint:enable=no-m...
[ "def query_history(self, t0, t1):\n t0 = pd.to_datetime(t0)\n t1 = pd.to_datetime(t1)\n t0 = t0.strftime(\"%Y-%m-%d %H:%M:%S\")\n t1 = t1.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n query_text = (\"select * from `region-us`.INFORMATION_SCHEMA.JOBS_BY_PROJECT \" +\n \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For an incoming transaction from a given origin, check if we have already responded to it. If so, return the response code and response body (as a dict).
def get_received_txn_response(self, transaction_id, origin): return self.db.runInteraction( "get_received_txn_response", self._get_received_txn_response, transaction_id, origin, )
[ "def set_received_txn_response(self, transaction_id, origin, code, response_dict):\n\n return self.db.simple_insert(\n table=\"received_transactions\",\n values={\n \"transaction_id\": transaction_id,\n \"origin\": origin,\n \"response_code\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Persist the response we returened for an incoming transaction, and should return for subsequent transactions with the same transaction_id and origin.
def set_received_txn_response(self, transaction_id, origin, code, response_dict): return self.db.simple_insert( table="received_transactions", values={ "transaction_id": transaction_id, "origin": origin, "response_code": code, ...
[ "def send_response(self, request):\n request.response.received_hash = request.sent_hash\n key = \"response_\" + request.name\n request.relation.data[self.app][key] = request.response.dumps()\n self.state.known_requests[request.id] = request.hash\n if not self.new_requests:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current retry timings (if any) for a given destination.
def get_destination_retry_timings(self, destination): result = self._destination_retry_cache.get(destination, SENTINEL) if result is not SENTINEL: return result result = yield self.db.runInteraction( "get_destination_retry_timings", self._get_destination_ret...
[ "def set_destination_retry_timings(\n self, destination, failure_ts, retry_last_ts, retry_interval\n ):\n\n self._destination_retry_cache.pop(destination, None)\n return self.db.runInteraction(\n \"set_destination_retry_timings\",\n self._set_destination_retry_timings,\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the current retry timings for a given destination. Both timings should be zero if retrying is no longer occuring.
def set_destination_retry_timings( self, destination, failure_ts, retry_last_ts, retry_interval ): self._destination_retry_cache.pop(destination, None) return self.db.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings, de...
[ "def get_destination_retry_timings(self, destination):\n\n result = self._destination_retry_cache.get(destination, SENTINEL)\n if result is not SENTINEL:\n return result\n\n result = yield self.db.runInteraction(\n \"get_destination_retry_timings\",\n self._get_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the smallest string up to max_len characters accepted by this NFA greater than sequence.
def next_accepted(self, sequence: Sequence[Text], max_len: int) -> Optional[Text]: max_hi = num_seqs_with_max_len(len(self.alphabet), max_len) desired_num_accepted = self.num_accepts_ge(max_len, sequence) - self.accepts(sequence) lo = seq_to_num(sequence, self.inverse_alphabet, max_len) + 1 ...
[ "def h_max_substr_finder(self, length=3):\n results = self.substr_finder(length)\n maximum = max(results.values())\n return ' '.join(map(lambda (key, _): key, filter(lambda (_, v): v == maximum, results.iteritems())))", "def selective_len(str, max):\r\n res = 0\r\n for c in str:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of sequences accepted by this NFA. Return the number of sequences with length less than or equal to max_len that are lexicographically greater than or equal to bound.
def num_accepts(self, max_len: int, bound: Sequence[Text] = ()) -> Tuple[int, int, int]: lt1: Dict[FrozenSet[int], int] = collections.defaultdict(int) lt2: Dict[FrozenSet[int], int] = collections.defaultdict(int) eq1: Dict[FrozenSet[int], int] = collections.defaultdict(int) eq2: Dict[Fro...
[ "def num_accepts_ge(self, max_len: int, bound: Sequence[Text] = ()) -> int:\n _, num_accepted_eq, num_accepted_gt = self.num_accepts(max_len, bound)\n return num_accepted_eq + num_accepted_gt", "def max_length(sequences):\t\n\tml=0\n\tfor ind_seq in sequences:\n\t\tif ind_seq.get_LENGTH() >= ml:\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of sequences >= bound with len() <= max_len accepted by this NFA.
def num_accepts_ge(self, max_len: int, bound: Sequence[Text] = ()) -> int: _, num_accepted_eq, num_accepted_gt = self.num_accepts(max_len, bound) return num_accepted_eq + num_accepted_gt
[ "def num_accepts(self, max_len: int, bound: Sequence[Text] = ()) -> Tuple[int, int, int]:\n lt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n lt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq2:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string representation of this NFA as a GraphViz .dot file.
def build_dot_str(self) -> Text: s = [] s.append("digraph {") for node in self.nodes: label = str(node) if node in self.start_nodes: label += "S" if node in self.accept_nodes: label += "A" s.append(f' "{node}" [labe...
[ "def __str__(self):\n graph = self.create_dot()\n return graph.string()", "def make_dot(self, fn=None):\n if self.is_directed():\n s = 'digraph G {\\n'\n for v in self.__g.vs():\n children = v.successors()\n if len(children) == 0:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a sequence that partitions the space of accepted strings. Returns a sequence, results, that is not necessarily accepted by nfa that partitions the space of all sequences lexicographically between lo and hi that nfa accepts of length max_len or less such target_ratio + tolerance_ratio of the sequences are lexicog...
def find_partition_seq( nfa: NFA, max_len: int, target_ratio=fractions.Fraction(1, 2), low: Iterable[Text] = (), high: Optional[Iterable[Text]] = None, tolerance_ratio: float = 0.0, ) -> Tuple[Text, ...]: max_letter = max(nfa.alphabet) lo: int = seq_to_num(low, nfa.inverse_alphabet, max...
[ "def next_accepted(self, sequence: Sequence[Text], max_len: int) -> Optional[Text]:\n max_hi = num_seqs_with_max_len(len(self.alphabet), max_len)\n desired_num_accepted = self.num_accepts_ge(max_len, sequence) - self.accepts(sequence)\n lo = seq_to_num(sequence, self.inverse_alphabet, max_len) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create should fail with a StratisCliNameConflictError trying to create new pool with the same devices and the same name as previous.
def test_create_same_devices(self): command_line = self._MENU + [self._POOLNAME] + self.devices self.check_error(StratisCliNameConflictError, command_line, _ERROR)
[ "def testCreate(self):\n pools1 = get_managed_objects(self._proxy).pools()\n\n (_, rc, _) = checked_call(\n Manager.CreatePool(\n self._proxy,\n name=self._POOLNAME,\n redundancy=0,\n force=False,\n devices=_DEVICE_STRATEGY.exa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create should fail with a StratisCliNameConflictError trying to create new pool with different devices and the same name as previous.
def test_create_different_devices(self): command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() self.check_error(StratisCliNameConflictError, command_line, _ERROR)
[ "def test_create_same_devices(self):\n command_line = self._MENU + [self._POOLNAME] + self.devices\n self.check_error(StratisCliNameConflictError, command_line, _ERROR)", "def testCreate(self):\n pools1 = get_managed_objects(self._proxy).pools()\n\n (_, rc, _) = checked_call(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that creating two pools with different names and the same devices raises a StratisCliInUseSameTierError exception.
def test_create_same_devices(self): command_line = self._MENU + [self._POOLNAME_2] + self._DEVICES self.check_error(StratisCliInUseSameTierError, command_line, _ERROR)
[ "def testCreate(self):\n pools1 = pools(ObjectManager.Methods.GetManagedObjects(self._proxy, {}))\n\n (_, rc, _) = Manager.Methods.CreatePool(\n self._proxy,\n {\n 'name': self._POOLNAME,\n 'redundancy': (True, 0),\n 'force': False,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that creating with tpm2 does something reasonable.
def test_create_tpm(self): command_line = self._MENU + [self._POOLNAME] + self._DEVICES + ["--clevis=tpm2"] TEST_RUNNER(command_line)
[ "def test_create_system_entire(self):\n pass", "def test_create_tam_security_advisory(self):\n pass", "def test_create_unexpected_problem(self):\n pass", "def test_create(self):\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.lo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether an exception is raised when a mandatory attribute does not belong to the product model definition.
def test_missing_mandatory_attributes(): model_definition = {'source': {'type': 'list', 'required': True, 'persisted': True}, 'resources.title': {'type': 'text', 'required': True...
[ "def test_missing_attribute_error(self):\n with pytest.raises(MissingAttributeError) as excinfo:\n raise MissingAttributeError('preferredKey', 'bibliographyLink')\n assert 'Error: required attribute preferredKey of bibliographyLink is missing.' in str(excinfo.value)", "def _clean_standalo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether the factory successfully validates a model when a nonrequired attribute is missing from the product model.
def test_alright_when_non_required_field_is_missing(): model_definition = {'language': {'type': 'fixed', 'required': True, 'persisted': True}, 'source': {'type': 'list', 'required': ...
[ "def test_missing_mandatory_attributes():\n model_definition = {'source': {'type': 'list',\n 'required': True,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'requi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether the factory successfully validates a model when a required attribute is missing from the product model, but a default value is given.
def test_alright_when_required_field_is_missing_but_default_is_given(): model_definition = {'language': {'type': 'fixed', 'required': True, 'persisted': True, 'default': 'portuguese'}, ...
[ "def test_missing_mandatory_attributes():\n model_definition = {'source': {'type': 'list',\n 'required': True,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'requi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the calculation of the similarity of two products based on a 'numeric' attribute.
def test_similarity_numeric(): similarity = pm.compute_similarity_for_numeric(900, 800) nose.tools.ok_(abs(similarity - 8/9) < tests.FLOAT_DELTA, "Wrong numeric similarity")
[ "def calculate_numeric_similarity(prop1, prop2, max_value):\n\n return (\n prop1 and\n prop2 and\n max_value and\n (1.0 - abs(prop1 - prop2 - 0.0 ) / max_value) or 0.0\n )", "def similarity(self, word1, word2):\n\n\traise NotImplementedError('implement it!')", "def test_tensor_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the calculation of the similarity of two products based on a 'date' attribute.
def test_similarity_date(): date1 = dt.datetime(2000, 11, 24, 10, 0) date2 = dt.datetime(2000, 11, 26, 10, 0) similarity = pm.compute_similarity_for_date(date1, date2, halflife=2) nose.tools.ok_(abs(similarity - 0.5) < tests.FLOAT_DELTA, "Wrong date similarity")
[ "def compare_prices_from_date(date1: str, date2: str, search=\"\") -> list:\n\n cols = []\n\n first = Price.objects.filter(timestamp__value=date1).select_related(\n \"card\").order_by(\"card__card_id\")\n second = Price.objects.filter(timestamp__value=date2).only(\n \"card_id\",\n \"va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the calculation of the similarity of two products based on a 'fixed' attribute.
def test_similarity_fixed(): similarity = pm.compute_similarity_for_fixed("Rio de Janeiro", "São Paulo") nose.tools.eq_(similarity, 0, "Wrong fixed similarity") similarity = pm.compute_similarity_for_fixed("Rio de Janeiro", "Rio de Janeiro") nose.tools.eq_(similarity, 1, "Wrong fixed similarity")
[ "def similarity(pair: Tuple[Text, Text]) -> float:\n (a, b) = pair\n missing = (\n True\n if any(symbol not in Metrics.realine.feature_matrix for symbol in pair)\n else False\n )\n return 0.0 if missing else 1 - Metrics.realine.delta(a, b)", "def test_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the calculation of the similarity of two products based on a 'list' attribute.
def test_similarity_list(): list1 = ["a", "b", "c"] list2 = ["b", "c", "d", "e"] similarity = pm.compute_similarity_for_list(list1, list2) nose.tools.ok_(abs(similarity - 2/3) < tests.FLOAT_DELTA, "Wrong list similarity") similarity = pm.compute_similarity_for_list(list2, list1) # intentionally as...
[ "def testSimilarityBatch(self):\n\n results = [r[0][0] for r in self.similarity([\"feel good story\", \"climate change\"], self.data)]\n self.assertEqual(results, [4, 1])", "def __testListingMatched(self, products, listing, should_match=True):\n \n if isinstance(products, str):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests conversion from a dict to a ProductModel instance.
def test_conversion_from_dict(): model_definition = { 'language': {'type': 'fixed', 'default': 'english'}, 'a': {'type': 'fixed', 'persisted': True}, 'b.c': {'type': 'fixed', 'persisted': True}, 'b.d.e': {'type': 'text', 'persisted': True}, 'b.d.f': {'type': 'numeric', 'persi...
[ "def test_create_from_dict(self):\n b1 = BaseModel()\n b1.name = \"Holberton\"\n b1.my_number = 89\n my_model_json = b1.to_dict()\n b2 = BaseModel(**my_model_json)\n self.assertEqual(b1.my_number, b2.my_number)\n self.assertEqual(b1.id, b2.id)\n self.assertEqu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return category details url
def category_details_url(id): return reverse('category:category-detail', args=[id])
[ "def get_url(self, category_id=None, category_details=None):\n return structures_module.categories.get_url(self.khoros_object, category_id, category_details)", "def get_absolute_url(self):\n return reverse('category-detail', args=[str(self.categoryId)])", "def get_category(self, uuid):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and return a sample category
def sample_category(name='place'): return Category.objects.create(name=name)
[ "def test_create_category(self):\n pass", "def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')", "def test_0005_create_category( self ):\n self.create_category( name='Test 0010 Repository With Tool De...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test viewing category details
def test_get_category_details(self): category = sample_category() url = category_details_url(category.id) res = self.client.get(url) serializer = CategorySerializer(category) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data)
[ "def test_get_category(self):\n response = client.get(self.category_url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['name'], self.category.name)", "def test_view_categories(self):\n response = self.client.get('/api/v2/categories/')\n self.asse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test creating a category with invalid details
def test_create_category_with_invalid_details_fails(self): res = self.client.post(CATEGORY_URL, {}) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( res.data['errors']['name'][0], 'This field is required.')
[ "def test_create_category_invalid(self):\n payload = {'name': ''}\n res = self.client.post(CATEGORIES_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_category(self):\n pass", "def test_error_category(self):\n url = reverse('cat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test create category with existing same name fails
def test_create_category_with_existing_name(self): sample_category() res = self.client.post(CATEGORY_URL, {"name": "place"}) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( res.data['errors']['name'][0], 'This field must be unique....
[ "def test_create_category(self):\n pass", "def test_add_same_category(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[0]),\n content_type='application/json',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test updating a category to existing name fails
def test_update_category_to_existing_name(self): sample_category() category = sample_category(name='House') url = category_details_url(category.id) res = self.client.put(url, {"name": "place"}) category.refresh_from_db() self.assertEqual(res.status_code, status.HTTP_400_...
[ "def test_update_category(self):\n pass", "def test_put_category(self):\n response = client.put(self.category_url, self.category_data_update)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['name'], self.category_data_update['name'])", "def test_categor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract the images into a numpy array.
def _extract_images(image_paths): num_images = len(image_paths) data = np.zeros((num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)) for i in range(num_images): image_path = image_paths[i] print('Extracting images from: ', image_path) image = imageio.imread(image_path) dat...
[ "def extract_images(filename,imageName):\n fData = h5py.File(filename,'r')\n inData = fData.get(imageName) \n \n \n num_images = inData.shape[0]\n rows = inData.shape[1]\n cols = inData.shape[2]\n print(num_images, rows, cols)\n data = numpy.array(inData)\n \n data = data.reshape(num_images, rows,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Aggregate metric value across towers.
def _aggregate_across_towers(metrics_collections, metric_value_fn, *args): def fn(distribution, *a): """Call `metric_value_fn` in the correct control flow context.""" if hasattr(distribution, '_outer_control_flow_context'): # If there was an outer context captured before this method was called, # ...
[ "def _aggregate_across_towers(metrics_collections, metric_value_fn, *args):\n def fn(distribution, *a):\n \"\"\"Call `metric_value_fn` in the correct control flow context.\"\"\"\n if hasattr(distribution, '_outer_control_flow_context'):\n # If there was an outer context captured before this method was c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call `metric_value_fn` in the correct control flow context.
def fn(distribution, *a): if hasattr(distribution, '_outer_control_flow_context'): # If there was an outer context captured before this method was called, # then we enter that context to create the metric value op. If the # caputred context is `None`, ops.control_dependencies(None) gives the ...
[ "def fn(distribution, *a):\n if hasattr(distribution, '_outer_control_flow_context'):\n # If there was an outer context captured before this method was called,\n # then we enter that context to create the metric value op. If the\n # caputred context is `None`, ops.control_dep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes to count the number of null lists in a specific feature path. When required_paths is also passed, rows which are null for all of the required paths will not be counted as missing.
def __init__(self, path: types.FeaturePath, required_paths: Optional[Iterable[types.FeaturePath]] = None): self._path = path if required_paths: self._required_paths = tuple(sorted(required_paths)) else: self._required_paths = None
[ "def calc_null(self):\n null = 0\n for x in range(0, self.tot_col):\n for y in range(1, self.tot_rows + 1):\n if self.file_list[y][x].lower() == 'null':\n null += 1\n print('Total number of null fields: ' + str(null))\n results.append('Total n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Calculates the sum over all dimensions, except the first (batch dimension), and excluding the last n_dims. This function will ignore the first dimension and it will not aggregate over the last n_dims dimensions.
def sum_over_all_but_batch_and_last_n( tensor: torch.Tensor, n_dims: int ) -> torch.Tensor: if tensor.dim() == n_dims + 1: return tensor else: dims = list(range(1, tensor.dim() - n_dims)) return tensor.sum(dim=dims)
[ "def _sum_remaining_dims(data: sc.DataArray, dim: str) -> sc.DataArray:\n to_be_summed = set(data.dims) - set([dim])\n summed = data\n for dim_ in to_be_summed:\n summed = sc.sum(summed, dim_)\n return summed", "def sum(self, dim=None, keepdim=False):\n return array_funcs.sum(self, dim, keep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an ndb.Model entity that the urlsafe key points to. Checks that the type of entity returned is of the correct kind. Raises an error if the key String is malformed or the entity is of the incorrect kind
def get_by_urlsafe(urlsafe, model): try: key = ndb.Key(urlsafe=urlsafe) except TypeError: raise endpoints.BadRequestException('Invalid Key') except Exception as e: if e.__class__.__name__ == 'ProtocolBufferDecodeError': raise endpoints.BadRequestException('Invalid Key') ...
[ "def get_by_urlsafe(urlsafe, model):\n try:\n key = ndb.Key(urlsafe=urlsafe)\n print \"Key allocated to key\"\n except TypeError:\n raise endpoints.BadRequestException('Invalid Key')\n print \"Type error\"\n except Exception, e:\n if e.__class__.__name__ == 'ProtocolBuffe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an Player (ndb.Model) entity for a given username and game. First verify username and game entity are valid, raises error if not. Then returns the Player entity for the given user in the game. Raises an error if a Player is not found.
def get_player_by_game(username, game): # check to make sure User exists if not check_user_exists(username): raise endpoints.NotFoundException( '{} does not exist!'.format(username)) # check to see if game is a valid Game entity if not isinstance(game, Game): raise endpoint...
[ "def get_player(username):\n sql = \"SELECT * FROM players WHERE `username`='{0}';\".format(username)\n return get(sql, False)", "def getPlayer(self, username=''):\n return self.get(username, self.audience)", "def get_user_game(self, user_id):\n if user_id in self.game_players.keys():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
if there are a start position, then add defaulttag item till that start position and add foundtag item form start with length n
def add_item(items, coder, tag, start, n): if start is not None: # close opened items add_zero_item(items, coder, tag, start) # default tag items[tag][coder].append(item(b=start, l=n-start, v=1)) # found tag
[ "def _next_ngram_tags(tagged, pos, n):\n if n == 0:\n return ''\n if pos == len(tagged) - 1:\n return '<end>'\n tag = tagged[pos + 1][1]\n return \" \".join([\n tag, _FeatureExtractor._next_ngram_tags(tagged, pos + 1, n - 1)\n ]).strip()", "def _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the sign evaluation message.
def to_msg(self): return SignEvaluationMsg( self.position.to_geometry_msg(), self.desc, *self.evaluate() )
[ "def sign(sk: SecretKey, msgs: List[bytes]) -> Signature:\n assert(len(msgs) == len(sk.y))\n\n # pick generator\n h = G1.generator()\n exponent = sk.x + sum([y_i * Bn.from_binary(m_i)\n for (y_i, m_i) in zip(sk.y.values(), msgs)])\n\n return Signature...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sum over the evaluations.
def sum_evaluations(evaluations): def add_evaluations(e1, e2): """Add two evaluations. If Signs do not have detections the distance is -1, therefore the distance needs to be handled separately. """ true_positive = e1[0] + e2[0] false_posi...
[ "def evaluate(self, solution, total = 0):\n for objective in self.objectives:\n total = total + objective(solution)\n return total", "def sum(self) -> float:\n x = sum(self.values) \n return(x)", "def sum(self):\n return sum(self.items())", "def sum_values(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add two evaluations. If Signs do not have detections the distance is 1, therefore the distance needs to be handled separately.
def add_evaluations(e1, e2): true_positive = e1[0] + e2[0] false_positive = e1[1] + e2[1] distance = e1[2] + e2[2] if e1[2] < 0 and e2[2] < 0: distance = -1 elif e1[2] < 0: distance = e2[2] elif e2[2] < 0: ...
[ "def sum_evaluations(evaluations):\n\n def add_evaluations(e1, e2):\n \"\"\"Add two evaluations.\n\n If Signs do not have detections the distance is -1, therefore the distance needs\n to be handled separately.\n \"\"\"\n true_positive = e1[0] + e2[0]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the evaluation for the given ids.
def get_evaluations(self, ids): evaluations = [(self.signs[i].evaluate(), self.signs[i].desc) for i in ids] descriptions = list({self.signs[i].desc for i in ids}) evaluations_per_sign = [ ([e for e, desc in evaluations if desc == description], description) for description...
[ "def evaluate(self) -> Dict[str, Any]:\n kwargs = {\"ids\": self._ids}\n return {\n metric.value: self._metric_funcs[metric](\n self._targets, self._preds, **kwargs\n )\n for metric in self._metrics\n }", "def compute(self, *args, **kwargs):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the plots from the detected signs.
def create_plots(self): shutil.rmtree(self.param.path, ignore_errors=True) os.makedirs(self.param.path) ids = list(range(len(self.signs))) """True positives""" values, kinds = self.get_evaluations(ids) plots.create_plot( kinds, [e[0] for e in val...
[ "def make_asimov_significance_plots(self):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n outdir = os.path.join(self.outdir, 'Significances')\n mkdir(outdir)\n maintitle = self.make_main_title(\n end='Asimov Analysis Significances',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publish an RVIZ marker on the publisher's topic.
def _publish_point_marker( self, point: Point, id: int, ns="simulation/sign_evaluation", ): rospy.logdebug(f"display point {point}") marker = visualization.get_marker( frame_id="sim_world", rgba=[255, 0, 255, 255], id=id, ...
[ "def setupPublishers(self):\n\n self.rviz_marker_publisher = rospy.Publisher(\"/spartan_grasp/visualization_marker\",\n visualization_msgs.msg.Marker, queue_size=1)", "def pub(self, data):\n self.rach.pub(self.topic, data)", "def publish(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the detection to the closest sign.
def add_detection(self, point: Point, desc: str): closest_sign = Sign.closest(self.signs, point) if closest_sign.position.distance(point) < self.param.distance_threshold: closest_sign.detections.append((point, desc)) self.evaluation_publisher.publish(closest_sign.to_msg())
[ "def addtag_closest(self, newtag, x, y, halo=None, start=None):\n self.addtag(newtag, 'closest', x, y, halo, start)", "def closest_detection(detections):\n closest_detection = None\n for det in detections:\n center = detection_center(det)\n if closest_detection is None:\n clo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(DEPRECATED) Generate an HTML5 appcache. Should be run after wq optimize, as some of the manifest entries will be inferred from the build log. Note that browser vendors are deprecating support for Application Cache in favor of Service Workers. The `wq appcache` command will be removed in wq.app 2.0. Use the `wq service...
def appcache(config, version): click.echo("Warning: Application Cache is deprecated by browser vendors.") time.sleep(10) if 'appcache' not in config: raise click.UsageError( "appcache section not found in %s" % config.filename ) if 'optimize' not in config: raise cl...
[ "def setup_caching(app, **kwargs):\n \n cache = SimpleCache(**kwargs)\n setattr(app, 'cache', cache)", "def offline_command(args):\n\n list_local_files()\n\n if os.path.exists(MANIFEST_FILENAME) and not options.force:\n print \"%s already exists (use -f to overwrite).\" % MANIFEST_FILENAME\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a dataframe containing the covariate X, and observations Y The X's are generated uniformly over each of the supplied segments.
def generate_data(func, points, seed=0): np.random.seed(seed) data = [] for segment in points: x = np.linspace(*segment["xlim"], num=segment["n_points"]) distribution = func(x) # Generate observations y = distribution.rvs() df = pd.DataFrame({"x": x, "y": y}) ...
[ "def generate_uniform_data():\n data = pd.DataFrame()\n x = np.array([])\n y = np.array([])\n\n for xval in np.arange(0, 1, 1/100):\n for yval in np.arange(0, 1, 1/100):\n x = np.append(x, xval)\n y = np.append(y, yval)\n data['x'] = x\n data['y'] = y\n comp = discr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Like scandir, but recursively. Will skip everything in the skip array, but only at the top level directory. Returns SEntry objects. If in_restricted is true, all returned entries will be marked as restricted even if their permissions are not restricted.
def recursedir(path='.', skip=[], alwaysskip=['.~tmp~'], in_restricted=False): for dentry in scandir(path): if dentry.name in skip: continue if dentry.name in alwaysskip: continue if dentry.name.startswith('.nfs'): continue # Skip things which are...
[ "def scandir(url: str) -> Iterable[DirEntry]:\n authenticated = credentials.authenticate(url)\n return SCANNER_REGISTRY.get_handler(authenticated.scheme).scandir(authenticated)", "def scandir(path='.'):\r\n for name in os.listdir(path):\r\n yield GenericDirEntry(path, name)", "def walk_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split data into train and test set. Train set is split with stratify label ratio between label_ratio_low and label_ratio_high.
def split_stratify_train(data: pd.DataFrame, label_ratio_low: float, label_ratio_high: float, test_size=0.2): while True: X_train, X_test, y_train, y_test = train_test_split(data.drop(columns=['LABEL']), data['LABEL'], test_size=test_size) ...
[ "def create_test_train_split(data, labels):\n strat_split = StratifiedShuffleSplit(n_splits=1, test_size=0.2)\n\n for train_idx, test_idx in strat_split.split(data, labels):\n X_train, X_test = data[train_idx], data[test_idx]\n y_train, y_test = labels[train_idx], labels[test_idx]\n\n pri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Train XGBoost model using scikitlearn RandomizedSearchCV, and output report. Train set is split into train and validation set. The validation set is used for early stopping.
def xgb_scikit_random_train(train_X, train_Y, test_X, test_Y): x_train, x_val, y_train, y_val = train_test_split(train_X, train_Y, test_size=0.1) logger.info(f"Train set size: {len(x_train)}, validation set(for early stopping) size: {len(x_val)}") objective = 'binary:logistic' eval_metric = 'logloss' ...
[ "def train_xgb(params, X_train, y_train, cv, scorer='neg_mean_squared_error', seed=42):\n\n n_estimators = int(params[\"n_estimators\"])\n max_depth= int(params[\"max_depth\"])\n\n try:\n model = xgb.XGBRegressor(n_estimators=n_estimators,\n max_depth=max_depth,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Leverage IGDB's API to search for game information.
async def gamelookup(self, ctx, *, game_name = None): if not game_name: return await ctx.send("Usage: `{}gamelookup [game_name]`".format(ctx.prefix)) if not self.access_token or time.time() >= self.expire_time: if not await self._update_token(): return await ctx.send("I...
[ "async def query_by_game(self, game: int, include_global=True) -> List[model]:\n pass", "def game_search(name):\r\n logger.info(\"Searching for %s\" % name)\r\n data = urllib.parse.urlencode({'term': name, 'f': 'games'})\r\n data = data.encode('utf-8')\r\n with urllib.request.urlopen(base_url, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function Takes login as string Returns result of compliance validation as array of strings, void array means pass
def login_validation(login): # Argument must be a string if not isinstance(login, str): raise TypeError("Argument must be a string") result = [] # Check for length if len(login) < 1 or len(login) > 20: result.append("Login length must be between 1 and 20 symbols") # Check if ha...
[ "def hasLoginPasswordExtractor():", "def validate(password):\n length = len(password)\n invalid_chars = [\" \", \"_\", \"-\"]\n spec_chars = list(\"!#$%&'()*+,./:;<=>?@[]^`{|}~\")\n\n # Checks for validitity of the password\n if length < 8:\n return \"Invalid\"\n else:\n for i in p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read and process data stored in input excel file then inject entry in DB
def importXlsxIntoDb(input): #import global variable global UPLOAD_ID global PATIENT_NUM global DATABASE connection = db.create_connection(DATABASE) xlsx = pd.read_excel(input) #looping on each row print(" - Importing data in DB", end = '') for index, row in xlsx.iterrows(): ...
[ "def Excel_Load_Data( self, ExcelFilename ):\n pass", "def import_excel(self):\n self.ensure_one()\n if self.file_import:\n filecontent = base64.b64decode(self.file_import)\n try:\n # Todo: import excel\n input = cStringIO.StringIO()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }