query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Outputs the gameboard as text.
def text_output(self): print(self.board) print()
[ "def display_game_board():\n print(game_board[0] + \" | \" + game_board[1] + \" | \" + game_board[2])\n print(game_board[3] + \" | \" + game_board[4] + \" | \" + game_board[5])\n print(game_board[6] + \" | \" + game_board[7] + \" | \" + game_board[8])", "def printBoard(self):\n for i in range(len(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the color at the specified coordinates on the gameboard.
def get_color(self, coord): return self.board[coord[0], coord[1]]
[ "def getColor(self, x, y):\n if self._checkRange(x, y, \"getColor\"):\n retval = self.pixels[x, y]\n return Color(retval)", "def get_color(self, row, col):\r\n square = self.get_square(row,col)\r\n if square is None:\r\n return None\r\n\r\n color = square.get_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the color at the specified cells on the gameboard.
def set_color(self, color, filled): for cell in filled: self.board[cell[0], cell[1]] = color
[ "def ColorCell(cell, color):\n\n Window.fill(color, cell[0])\n cell[1] = color", "def color_cells(self, cells=None, color=\"A9A9A9\"):\r\n try:\r\n for ind in cells:\r\n shadding_elm = parse_xml(r'<w:shd {0} w:fill=\"{1}\"/>'.format(nsdecls('w'), color))\r\n s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the new cell has the same color. If so, then it will be added to filled_edges.
def check_if_filled(self, new_cell, cell_color, filled_edges, filled_surrounded): new_cell_color = self.get_color(new_cell) if new_cell_color != cell_color: return False if new_cell not in filled_edges + filled_surrounded: filled_edges.append(new_cell) return Tr...
[ "def good_cell(self):\n self.color = self.good_color", "def _add_match(self, row: int, col: int, rowDelta: int, colDelta: int) -> None:\r\n if self.field[row][col].status != EMPTY:\r\n for i in range(1, 3):\r\n if not self._is_valid_row_num(row + rowDelta * i) \\\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts the number of adjacent cells of the specified color.
def get_color_count(self, color, filled): count = 0 for cell in filled: coord_x = cell[1] coord_y = cell[0] # up if coord_y - 1 >= 0: new_cell = (coord_y-1, coord_x) cell_up_color = self.get_color(new_cell) ...
[ "def count_colors(board, color):\n n = 0\n for cell in board:\n if cell == color:\n n += 1\n elif cell == cinv(color):\n n -= 1\n return n", "def countDiff(self, color):\n count = 0\n for y in range(self.n):\n for x in range(self.n):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find apropriate files for the subject and download them Search through all files for all acquisitions for all sessions for this subject and download only the T1 nifti files. If file names are repeated a number is prepended. Troublesome characters in the file name are replaced with "_". The file's original name, full pa...
def find_and_download_files(context): input_path = 'input/' if os.path.isdir(input_path): log.debug('Path already exists: ' + input_path) else: log.debug('Creating: ' + input_path) os.mkdir(input_path) fw = context.client if 'classification_measurement' in context.config:...
[ "def downloadAll():\n _subjects = SubjectsEnum.members()\n for subject in SubjectsEnum.members(): \n print(\"\\n-------------------------------------------------------------\")\n if os.path.exists(SpringerScrapper.download_dir+'\\\\'+subject):\n files = os.listdir(SpringerScrapper.dow...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set final status to last line of reconallstatus.log.
def set_recon_all_status(subject_dir): path = context.gear_dict['output_analysisid_dir'] + '/' + \ subject_dir + '/scripts/recon-all-status.log' if os.path.exists(path): with open(path, 'r') as fh: for line in fh: pass last_line = line else: ...
[ "def final_status(self, final_status):\n\n self._final_status = final_status", "def after_epoch(self):\n line = ' '.join([str(k) + ': ' + str(v) for k, v in self.trainer.status.items()])\n with open(os.path.join(self.root_path, 'log.txt'), 'a+') as fout:\n fout.write(line + '\\n')...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set final status to last line of reconallstatus.log.
def set_recon_all_status(subject_dir): path = context.gear_dict['output_analysisid_dir'] + '/' + \ subject_dir + '/scripts/recon-all-status.log' if os.path.exists(path): with open(path, 'r') as fh: for line in fh: pass last_line = line else: ...
[ "def final_status(self, final_status):\n\n self._final_status = final_status", "def after_epoch(self):\n line = ' '.join([str(k) + ': ' + str(v) for k, v in self.trainer.status.items()])\n with open(os.path.join(self.root_path, 'log.txt'), 'a+') as fout:\n fout.write(line + '\\n')...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the given value is "close enough" to the desired value Because sometimes MagneticFieldStrength can be provide in mT (3000, 1500) or something like 2.9721T
def field_strength_close_enough(field_strength, desired_value): if field_strength > 100: # assume it is in mT instead of Teslas field_strength /= 1000 # and turn it into Teslas diff = abs(field_strength - desired_value) if diff < 0.2: return True else: return False
[ "def get_speed_tolerance(self):\n return False #Not supported", "def is_temp_gt_25(self):\r\n return self.temp > 25.0", "def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0", "def aboveFreezing(self):\r\n if sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Powers or unpowers the bells.
def power_bells(state): if not pinlessMode: if state: for pin in bellPins: GPIO.output(pin, GPIO.HIGH) elif not state: for pin in bellPins: GPIO.output(pin, GPIO.LOW) else: logging.debug("Bell state: " + str(state))
[ "def take_damage(self):\n self.health -= 1", "def at_bleed_tick(self, divisor):\n if self.bleed > 0:\n self.bleed = round(self.bleed / divisor, 0)\n if self.bleed < 2:\n self.bleed = 0", "def applyEnhancement(self):\n self.thing.powerUp(self)", "def useRestore...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rings the school bells in a pattern for the given schedule/time.
def ring_bells(): # Need to get the pattern for this time slot and apply it. curTime = time.strftime("%H:%M") if curTime not in jsonConfig["schedules"][curSchedule]: logging.error("Couldn't find time record for time " + curTime + " in schedule " + curSchedule) return # Obtain the patter...
[ "def greedy_claim_schedule(problem):", "def schedule_rainy_day():\n event_begin = Event(\"BeginRainyDay\", World(), begin_rainy_day)\n Simulation().schedule(event_begin, time=int(expovariate(RAIN_RATE)*60*24))", "def greedy_dynamic_schedule(problem):", "def board(self, stop, time):\n # people wai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reloads the schedule from our json file.
def reload_schedule(): global jsonConfig global curSchedule jsonConfig = None curSchedule = None # Clear currently scheduled bells. schedule.clear("current") logging.debug("Reloading schedule...") with open(jsonFile) as jsonFileHandle: jsonConfig = json.load(jsonFileHandle) ...
[ "def load(self):\n if isfile(self.schedule_file):\n json_data = {}\n with open(self.schedule_file) as f:\n try:\n json_data = json.load(f)\n except Exception as e:\n LOG.error(e)\n current_time = time.time()\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Undistort the image using distortion coefficients
def undistort_image(mtx_, dist_, img_): dst = cv2.undistort(img_, mtx_, dist_, None, mtx_) return dst
[ "def undistort(self, image):\n return cv2.undistort(image, self.camera_matrix, self.distortion_coeffs, None, self.camera_matrix)", "def undistort(self, img):\n # in order to use this function, CameraCalibration object has to be created, which means camera calibration has\n # to be done again....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate Perspective and Inverse Perspective Transform Matrices
def calc_transform(src_, dst_): M_ = cv2.getPerspectiveTransform(src_, dst_) Minv_ = cv2.getPerspectiveTransform(dst_, src_) return M_, Minv_
[ "def perspective_transform(self):\n mtx_perp = cv2.getPerspectiveTransform(self.src_points, self.dst_points)\n mtx_perp_inv = cv2.getPerspectiveTransform(self.dst_points, self.src_points)\n return mtx_perp, mtx_perp_inv", "def __calculateTransformationMatrix(self):\n\t\tpts1 = np.float32([\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract all Non Zero Pixels and return X, Y Coordinates
def extract_pixels(img_): non_zero_pixels = np.argwhere(0 < img_) x = non_zero_pixels.T[0].astype(np.float32) y = non_zero_pixels.T[1].astype(np.float32) return x, y
[ "def __create_xyz_points(raster, no_data=-9999):\n y, x = np.where(raster != no_data)\n z = np.extract(raster != no_data, raster)\n\n return x, y, z", "def get_scanner_xy(points, proj):\n\n # Find the pixel corresponding to (x=0,y=0)\n res_x = proj.projector.res_x # 5 pixels / m, 1 px ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get x intercepts for given y value
def get_intercepts(fit, y): x = fit[0] * (y * y) + fit[1] * y + fit[2] return x
[ "def intercept_(self):\n return pd.Series(self.estimator.intercept_, index=self._y_labels)", "def y_to_x(self, y: float) -> [float]:\n return [self.line1.y_to_x(y)[0], self.line2.y_to_x(y)[0]]", "def intercept(x1, y1, x2, y2):\r\n m = slope(x1, y1, x2, y2)\r\n return y1 - m*x1", "def calc_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get Left_x, Right_x, Left_y, Right_y, Image , return Image with Polygon
def draw_polygon(left_x, right_x, left_y, right_y, img_): pts_left = np.array([np.flipud(np.transpose(np.vstack([left_x, left_y])))]) pts_right = np.array([np.transpose(np.vstack([right_x, right_y]))]) pts = np.hstack((pts_left, pts_right)) img_ = cv2.polylines(img_, np.int_([pts]), isClosed=False, colo...
[ "def get_pil_format(self):\n corner_points = self._polygon\n corner_points = corner_points[:2] + corner_points[2:][::-1]\n corner_points = [point[::-1] for point in corner_points]\n\n return corner_points", "def generatePolygons():", "def get_image(self, shape):\n output = np....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw Polylines for points with given thickness specified by Window Size
def draw_polylines(input_img, pts, window_size): return cv2.polylines(input_img, np.int_([pts]), isClosed=False, color=(255, 255, 255), thickness=2 * window_size)
[ "def draw_lines(surface, lines, color, width):\n for points in lines:\n draw_line(surface, points, color, width)", "def draw_line(surface, points, color, width, closed=False):\n pg.draw.lines(surface, color, closed, points, width)", "def draw_lines(self, color, points, width = 1, closed = False):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use polyfit from the mask points for smoothening them
def smoothen_masks(fit, img_, window_size): img_size = img_.shape mask_poly = np.zeros_like(img_) # Get top to Bottom for refactoring # mask_y = np.linspace(0, img_size[0] - 1, img_size[0]) mask_x = get_intercepts(fit, mask_y) # Smoothen the mask # pts = coordinates_to_imgpts(mask_x, mask_y...
[ "def polyfitr(x, y, order, clip, xlim=None, ylim=None, mask=None, debug=False):\n\n x = np.asanyarray(x)\n y = np.asanyarray(y)\n isort = x.argsort()\n x, y = x[isort], y[isort]\n\n keep = np.ones(len(x), bool)\n if xlim is not None:\n keep &= (xlim[0] < x) & (x < xlim[1])\n if ylim is n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the mean value of fit "Left" and "Right" based on flag
def get_mean_fit(flag='L'): if flag == 'L': return np.mean(np.vstack(l_coeff_queue), axis =0) if len(l_coeff_queue)>1 else l_coeff_queue[-1] else: return np.mean(np.vstack(r_coeff_queue), axis =0) if len(r_coeff_queue)>1 else r_coeff_queue[-1]
[ "def getMean(tree, i=0, flag=False):\n if tree.right_tree.value is not None:\n tree.right_tree.label_class = getMean(tree.right_tree, i=i+1)\n if tree.left_tree.value is not None:\n tree.left_tree.label_class = getMean(tree.left_tree, i=i+1)\n means = (tree.right_tree.label_class + tree.left_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the Last Fit depending on the flag
def get_last_fit(flag='L'): if flag == 'L': return l_coeff_queue[-1] else: return r_coeff_queue[-1]
[ "def getFit(self):\n if self.fits.has_key('default'):\n return self.fits['default']\n else:\n return None", "def last_fmeasure(self):\n return self.get_fvalue(self.last_position())", "def get_last_saved_estimation(self):\n return None", "def get_fit(self, spac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use The current values of Curvature and Offset from Left and Right Lanes to decide if Lanes are sane
def curvature_sanity(left_curvature, left_offset, right_curvature, right_offset): if return_queue_len(flag='L') >= 1 and return_queue_len(flag='R') >= 1: offset = center_position - (left_offset + right_offset) / 2. offset_measure = np.abs(overall_offset - offset) return True if offset_measur...
[ "def GetLoCorner(self):\n ...", "def looking_left(landmarks):\n nose_bridge = landmarks['landmarks'].nose_bridge()\n nose_bottom = landmarks['landmarks'].nose_bottom()\n\n nose_bottom_x_min = np.min(nose_bottom[:,0])\n nose_bottom_x_max = np.max(nose_bottom[:,0])\n\n return nose_bridge[-1][0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use the left and right fit
def update_lanewidth(left_fit, right_fit, img_): img_size = img_.shape y_eval = np.linspace(0, img_size[0], 20) left_x = get_intercepts(left_fit, y_eval) right_x = get_intercepts(right_fit, y_eval) return np.clip(right_x - left_x, 400, 800)
[ "def hflip(self):\n self.leftimg, self.rightimg = self.rightimg, self.leftimg", "def _calcStretchFactors(self):\n self.stretchX = math.log10(self.x2 - self.x1 + 1.0)\n self.stretchY = math.log10(self.y2 - self.y1 + 1.0)", "def _update_image_bounderies(self, read_start, read_end):\n s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an ISO 6346 shipping container code.
def create(owner_code, serial, category='U'): if not (len(owner_code) == 3 and owner_code.isalpha()): raise ValueError("Invalid ISO 6346 owner code '{}'".format(owner_code)) if category not in ('U', 'J', 'Z', 'R'): raise ValueError("Invalid ISO 6346 category identifier '{}'".format(category)) ...
[ "def create_state_id(self):\n # TODO: use fips code\n for key, value in config.fips_dict.iteritems():\n if key == config.state.lower():\n state_num = value\n if state_num <=9:\n state_num = '0' + str(state_num)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the check digit for an ISO 6346 code without that digit
def check_digit(raw_code): s = sum(code(char) * 2**index for index, char in enumerate(raw_code)) return s % 11 % 10
[ "def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)", "def __digit(cls, s_code: str) -> str:\n i = 1\n tmp_sum = 0\n for num in s_code: # number 为0-9\n if 0 == i % 2:\n tmp = int(ord(num))...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the ISO 6346 numeric code for a letter.
def letter_code(letter): value = ord(letter.lower()) - ord('a') + 10 return value + value // 11
[ "def letter_num(num: int):\n if abs(num) > 26 or num == 0:\n let = ord('a') + 26 - 1\n else:\n let = ord('a') + abs(num) - 1\n return chr(let)", "def get_ascii(letter):\n\n ascii_val = ord(letter.upper()) # gets the ascii value of a given letter\n\n return ascii_val", "def get_alph...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes
def insert(self, item): self.pool.append(item) if len(self.pool) == self.min_tree_size: self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn)) self.pool = [] while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size: a = self.trees.pop() ...
[ "def bst_insert(sizes):\n tree = rbTree_main.BinarySearchTree();\n for i in range(sizes):\n tree.insert(random.random())", "def test_insert_to_small_tree_existing_num(small_tree):\n small_tree.insert(40, autobalance=False)\n assert small_tree.size() == 6", "def test_insert_to_small_tree_updat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)
def nearest(self, query): nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees)) distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool)) best = None best_cost = np.inf for cost, near in nearest_trees + distances_pool: ...
[ "def __Nearest(self):\n mydists = []\n for vertex in self.vertices:\n mydist = self.__dist(vertex, self.Xrand)\n mydists.append(mydist)\n min_dist_index, min_dist = min(enumerate(mydists), key=operator.itemgetter(1))\n min_node = list(self.vertices)[min_dist_index]\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)
def neighbourhood(self, query, radius): tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.get_all_in_range(query, radius))) neighbourhood_trees = list(itertools.chain.from_iterable(map(tree_neighbourhood, self.trees))) return neighbourhood_trees + list(filter(lambda x: self.dist_fn...
[ "def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):\n ratio_within_radius = 1\n threshold = 1 - self.radius_cutoff_ratio\n total_candidates = np.array([], dtype=int)\n total_neighbors = np.array([], dtype=int)\n total_distances = np.array([], dtype=float)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a new pair of products with times purchased together if the pair existed, just increase the times purchased otherwise just add the new pair
def add(self, prod1_name, prod2_name, times): if prod1_name == prod2_name: return try: self._purchased.update({PROD1: prod1_name, PROD2: prod2_name, TIMES: {'$exists': True}}, {'$inc': {TIMES: times}}, True ...
[ "def _append_pairs(new_pairs):\n desired_pairs = restore_pairs() or []\n desired_pairs += new_pairs\n print(\"Adding {} new pairs, queue has {} pairs\".format(len(new_pairs), len(desired_pairs)))\n save_pairs(desired_pairs)", "def add(self, product):\n pass", "def add_points(self, pair1, pair...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign a fixed times of the given pair of products with times purchased together
def assign(self, prod1_name, prod2_name, times): try: self._purchased.update({PROD1: prod1_name, PROD2: prod2_name}, {'$set': {TIMES: times}}, True ) self._purchased.update({PROD1: pr...
[ "def get_product_time_bounds(self,\n product: str\n ) -> Tuple[datetime.datetime, datetime.datetime]:", "def promotion(time, sum_price):\n time = second_to_minute(time)\n for (pro, price) in [(24*60, 150), (12*60, 100), (8*60, 80), (3*60, 40), (60...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recommend the next best relevant product
def recommend_next_product(self, prod_list): scores = defaultdict(float) for prod in prod_list: for item in self._purchased.find({PROD1: prod}): if not item[PROD2] in prod_list: scores[item[PROD2]] += math.log(item[TIMES]) if len(scores) == 0: ...
[ "def recommend(self, product, num_of_recommendations):\n # Get prod_user_matrix & map from prepare_clean_data()\n # infer(prod_user_matrix, map, product, num_of_recommendations)\n # returns num_of_recommendations most relevant items based on product\n return\n # ------------------...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Trains the classifier model on the training set stored in file trainfile
def train(self, trainfile): sentences_emb,labels=self.read_data(trainfile) logReg = LogisticRegression(penalty="l2",C = 10, multi_class='auto',solver='newton-cg') logReg.fit(sentences_emb,labels) self.clf=logReg
[ "def train(self, trainfile):", "def train(self, trainingSetFileName):\n pass", "def train_classifier(self):\n\n # make sure we have a featureset\n if not self.featureset:\n self.prepare_features()\n print \"* Training {1}\\tsize(training_set) = {0}\\n\".format(len(self.tr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience split function for inverted index attributes. Useful for attributes that contain filenames. Splits the given string s into components parts (directories, filename), discarding the extension and all but the last two directories. What's remaining is split into words and the result is returned.
def split_path(s): dirname, filename = os.path.split(s) fname_noext, ext = os.path.splitext(filename) levels = dirname.strip('/').split(os.path.sep)[2:][-2:] return PATH_SPLIT.split(' '.join(levels + [fname_noext]))
[ "def split_path(s):\n dirname, filename = os.path.split(s)\n fname_noext, ext = os.path.splitext(filename)\n for part in dirname.strip('/').split(os.path.sep)[2:][-2:] + [fname_noext]:\n for match in PATH_SPLIT.split(part):\n if match:\n yield match", "def _split_filename...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers one or more object attributes and/or multicolumn indexes for the given type name. This function modifies the database as needed to accommodate new indexes and attributes, either by creating the object's tables (in the case of a new object type) or by altering the object's tables to add new columns or indexes....
def register_object_type_attrs(self, type_name, indexes = [], **attrs): if len(indexes) == len(attrs) == 0: raise ValueError, "Must specify indexes or attributes for object type" table_name = "objects_%s" % type_name # First pass over the attributes kwargs, sanity-checking provided...
[ "def register_object_type_attrs(self, type_name, indexes = [], **attrs):\n if len(indexes) == len(attrs) == 0:\n raise ValueError(\"Must specify indexes or attributes for object type\")\n\n table_name = \"objects_%s\" % type_name\n\n # First pass over the attributes kwargs, sanity-ch...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers a new inverted index with the database. An inverted index maps arbitrary terms to objects and allows you to query based on one or more terms. If the inverted index already exists with the given parameters, no action is performed. name is the name of the inverted index and must be alphanumeric. min and max spe...
def register_inverted_index(self, name, min = None, max = None, split = None, ignore = None): # Verify specified name doesn't already exist as some object attribute. for object_name, object_type in self._object_types.items(): if name in object_type[1] and name != object_type[1][name][2]: ...
[ "def register_inverted_index(self, name, min = None, max = None, split = None, ignore = None):\n # Verify specified name doesn't already exist as some object attribute.\n for object_name, object_type in self._object_types.items():\n if name in object_type[1] and name != object_type[1][name]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds an object of type 'object_type' to the database. Parent is a (type, id) tuple which refers to the object's parent. 'object_type' and 'type' is a type name as given to register_object_type_attrs(). attrs kwargs will vary based on object type. ATTR_SIMPLE attributes which a None are not added. This method returns th...
def add(self, object_type, parent = None, **attrs): type_attrs = self._get_type_attrs(object_type) if parent: attrs["parent_type"] = self._get_type_id(parent[0]) attrs["parent_id"] = parent[1] # Increment objectcount for the applicable inverted indexes. inverted_...
[ "def Add(self, obj_type, name, node=None, obj=None):\n print \"Adding object %s, node: %s\" % (name, node)\n #check for duplicate object\n # also raise error if no such object type\n if self.ObjectExists(obj_type, name):\n raise DuplicateObjectError(name)\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update an object in the database. For updating, object is identified by a (type, id) tuple or an ObjectRow instance. Parent is a (type, id) tuple or ObjectRow instance, which refers to the object's parent. If specified, the object is reparented, otherwise the parent remains the same as when it was added with add(). att...
def update(self, obj, parent=None, **attrs): if isinstance(obj, ObjectRow): object_type, object_id = obj['type'], obj['id'] else: object_type, object_id = obj type_attrs = self._get_type_attrs(object_type) get_pickle = False # Determine which inverted in...
[ "def do_object_update(ident, model, attrs, data=None):\n \n obj = get_object(ident, model, attrs=attrs)\n if not obj:\n print_notfound(ident, model, attrs)\n return 1\n \n return update_object_bydata(obj, data)", "def update_obj(obj, attributes, params):\n for key in params.keys():...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scores the terms given in terms_list, which is a list of tuples (terms, coeff, split, ivtidx), where terms is the string or sequence of terms to be scored, coeff is the weight to give each term in this part (1.0 is normal), split is the function or regular expression used to split terms (only used if a string is given ...
def _score_terms(self, terms_list): terms_scores = {} total_terms = 0 for terms, coeff, split, ivtidx in terms_list: if not terms: continue # Swap ivtidx name for inverted index definition dict ivtidx = self._inverted_indexes[ivtidx] ...
[ "def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes all indexed terms under the specified inverted index for the given object. This function must be called when an object is removed from the database, or when an ATTR_INVERTED_INDEX attribute of an object is being updated (and therefore that inverted index must be reindexed).
def _delete_object_inverted_index_terms(self, (object_type, object_id), ivtidx): self._delete_multiple_objects_inverted_index_terms({object_type: ((ivtidx,), (object_id,))})
[ "def _delete_object_inverted_index_terms(self, obj, ivtidx):\n object_type, object_id = obj\n self._delete_multiple_objects_inverted_index_terms({object_type: ((ivtidx,), (object_id,))})", "def clean(self):\n terms = list(self)\n\n for t in terms:\n self.doc.remove_term(t)",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the dictionary of terms (as computed by _score_terms()) to the specified inverted index database for the given object.
def _add_object_inverted_index_terms(self, (object_type, object_id), ivtidx, terms): if not terms: return # Resolve object type name to id object_type = self._get_type_id(object_type) # Holds any of the given terms that already exist in the database # with their id ...
[ "def _add_object_inverted_index_terms(self, obj, ivtidx, terms):\n object_type, object_id = obj\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the data...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Queries the inverted index ivtidx for the terms supplied in the terms argument. If terms is a string, it is parsed into individual terms based on the split for the given ivtidx. The terms argument may also be a list or tuple, in which case no parsing is done. The search algorithm tries to optimize for the common case. ...
def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None): t0 = time.time() # Fetch number of files the inverted index applies to. (Used in score # calculations.) objectcount = self._inverted_indexes[ivtidx]['objectcount'] if not isinstance(terms, (list, t...
[ "def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None):\n t0 = time.time()\n # Fetch number of files the inverted index applies to. (Used in score\n # calculations.)\n objectcount = self._inverted_indexes[ivtidx]['objectcount']\n\n if not isinstance(term...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Obtains terms for the given inverted index name. If associated is None, all terms for the inverted index are returned. The return value is a list of 2tuples, where each tuple is (term, count). Count is the total number of objects that term is mapped to. Otherwise, associated is a specified list of terms, and only those...
def get_inverted_index_terms(self, ivtidx, associated = None, prefix = None): if ivtidx not in self._inverted_indexes: raise ValueError, "'%s' is not a registered inverted index." % ivtidx if prefix: where_clause = 'WHERE terms.term >= ? AND terms.term <= ?' where_va...
[ "def enumerate_match(self, prefix: List[str]) -> List[str]:\n matched_terms = []\n cur = self._root\n for i, token in enumerate(prefix):\n if token not in cur.children:\n break\n cur = cur.children[token]\n if cur.is_term:\n item = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that trailing @ used for extracting does not interfere with untag.
def test_untag_with_trailing_extract(self): fields_to_test = { 'foo@': 'bar-base', 'foo@de@': 'bar-de', 'foo@(.*_FR|.*_SG)@': 'bar-fr', 'nested': { 'nested@': 'nested-base', 'nested@de_AT@': 'nested-de', 'nested@(.*_...
[ "def delete_tag(element):\r\n found = re.search(r'@', element)\r\n if found:\r\n return ''\r\n else:\r\n return element", "def mention(result):\n return result.text.find('@') != -1", "def testUnindentedFields(self):\n self.checkParse(\"\"\"\n This is a paragraph.\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that not having a base key does not interfere with untag and locales.
def test_untag_with_no_base(self): fields_to_test = { 'foo@de': 'bar-de', 'baz@de': { 'fum@de': 'boo-de' }, } fields = copy.deepcopy(fields_to_test) self.assertDictEqual({}, document_fields.DocumentFields.untag(fields)) self.ass...
[ "def test_invalid_locale_keys(self):\n # Banned locale element.\n self.data[\"locales\"][\"es\"][\"default_locale\"] = \"foo\"\n self.analyze()\n self.assert_failed(with_warnings=True)", "def test_natural_key_no_platform():\n with LogCapture() as log_capture:\n Accoun...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Untag when there is a none value for the tagged value.
def test_untag_none(self): untag = document_fields.DocumentFields.untag fields_to_test = { 'foo': 'base', 'foo@env.prod': None, } fields = copy.deepcopy(fields_to_test) self.assertDictEqual({ 'foo': 'base', }, untag(fields, locale=None,...
[ "def tag(self, value):\r\n self._tag = value if value is not None else None", "def testNoneValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'user@example.com')\n namespace = createNamespace(user, u'name')\n tag ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Summary of the time series. include mean, std, max, min and range
def summaryone(x): print 'mean and std are ',np.mean(x), np.std(x) print 'max and min are ',np.max(x), np.min(x) print 'the range is ',np.max(x)-np.min(x)
[ "def _summary_stats(data):\n \n stats = {'min':[],'max':[], 'mean':[]}\n \n for scan in range(len(data)):\n stats['min'].append(\n (scan, min(data[scan][1]))\n )\n stats['max'].append(\n (scan, max(data[scan][...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and returns a MySQL database engine.
def create_mysql_engine(dbname, prod=True, driver="pymysql"): db_config = toolbox.open_system_config(prod=prod, config_type="DB_CONFIG")[dbname] db_url = URL( drivername="mysql+{}".format(driver), username=db_config.get("username"), password=db_config.get("password"), host=db_con...
[ "def __getDbEngine():\n\t## http://www.sqlalchemy.org/docs/dialects/mysql.html#module-sqlalchemy.dialects.mysql.mysqldb\n\t## mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>\n\tengine = sqlalchemy.create_engine('mysql+mysqldb://' + user + ':' + password + '@' + host + ':' + port + '/' + dbname)\n\treturn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and returns a connection to a Microsoft SQL Server database.
def create_mssql_connection( dbname, prod=True, driver="{ODBC Driver 17 for SQL Server}", driver_type="pyodbc" ): db_config = toolbox.open_system_config(prod=prod, config_type="DB_CONFIG")[dbname] if driver_type == "pyodbc": connection = pyodbc.connect( driver=driver, server=...
[ "def mssqlconn():\n return _mssql.connect(\n server=OPTIONS.get('db', 'server'),\n user=OPTIONS.get('db', 'user'),\n password=OPTIONS.get('db', 'password'),\n database=OPTIONS.get('db', 'database'),\n port=OPTIONS.getint('db', 'port'),\n )", "def createConnection(self)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fix the dates for the CEMS data Three date/datetime changes (not all implemented) Make op_date a DATE type Make an appropriate INTERVAL type (not implemented) Add a UTC timestamp (not implemented)
def fix_up_dates(df): # Convert to interval: # df = convert_time_to_interval(df) # Convert op_date and op_hour from string and integer to datetime: # Note that doing this conversion, rather than reading the CSV with # `parse_dates=True` is >10x faster. # Make an operating timestamp df["oper...
[ "def fix_types(self, french_decs=None): \n \n def fix_time_cols(self):\n # Fix TIme Columns\n try:\n # fix time cols\n for c in self.time_cols:\n self.df[c] = pd.to_datetime(self.df[c], format='%H:%M:%S')\n except:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test whether every element in the series is either missing or in values This is fiddly because isin() changes behavior if the series is totally NaN (because of type issues)
def _all_na_or_values(series, values): series_excl_na = series[series.notna()] if not len(series_excl_na): out = True elif series_excl_na.isin(values).all(): out = True else: out = False return out
[ "def is_empty(series):\n return series.isna().all()", "def checkNaN(data):\n if data.isnull().values.any():\n N = data.isnull().sum().sum()\n print(\"There are {} missing values.\".format(N))", "def pd_isnan(val):\n return val is None or val != val", "def isnan(q):\n return np.any(np...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Drop these calculated rates because they don't provide any information. If you want these, you can just use a view.
def drop_calculated_rates(df): if not _all_na_or_values(df["so2_rate_measure_flg"], {"Calculated"}): raise AssertionError() if not _all_na_or_values(df["co2_rate_measure_flg"], {"Calculated"}): raise AssertionError() del df["so2_rate_measure_flg"], df["so2_rate_lbs_mmbtu"] del df["co2_...
[ "def clearCosts(self):\n self.Rate = 0", "def rates(self):\n return self._rates", "def clear_rate_BCs(self):\n self.set_BC(pores=None, bctype='rate', mode='remove')", "def get_zero_rates(self):\r\n self.__bootstrap_zero_coupons__()\r\n self.__get_bond_spot_rates__()\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the bins used in the Riemann sum over metallicities
def calculateMetallicityBinEdges(self): if self.binInLogSpace: logMetallicities = np.log10(self.metallicityGrid) b= logMetallicities[:-1] + (logMetallicities[1:] - logMetallicities[:-1])/2. b = 10.**b #the boundaries for integration are not in log space so ...
[ "def get_edges_metallicity_bins(self):\n met_val = np.log10(self.get_centers_metallicity_bins())\n bin_met = np.zeros(len(met_val)+1)\n # if more than one metallicty bin\n if len(met_val) > 1 :\n bin_met[0] = met_val[0] - (met_val[1] - met_val[0]) / 2.\n bin_met[-1]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make component fields, other info into dict for template context
def make_context( container: ServiceContainer, component_name: str, **kwargs ) -> Dict[str, Any]: from wired_components.component import IWrapComponents, IComponent # Start with all the wrapped components context: Dict[str, Any] = container.get(IWrapComponents) # We get the co...
[ "def setup_template_variables(self, context, data_dict):", "def construct_content(self) -> ty.Dict[str, ty.List['Component']]:\n return {}", "def variables_for_template(self):\n\n return {}", "def custom_get_template_values(self, field, cstruct, kw):\n values = {'cstruct': cstruct, 'field': f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Launch training of the model with a set of hyperparameters in parent_dir/job_name
def launch_training_job(model_dir,job_name, params, implementation_dir): # Create a new folder in implementation corresponding to the model implementation_dir = os.path.join(implementation_dir, os.path.basename(os.path.normpath(model_dir))) if not os.path.exists(implementation_dir): os.makedirs(impl...
[ "def launch_training_job(parent_dir, job_name, params):\r\n # Create a new folder in parent_dir with unique_name \"job_name\"\r\n model_dir = os.path.join(parent_dir, job_name)\r\n if not os.path.exists(model_dir):\r\n os.makedirs(model_dir)\r\n\r\n # Write parameters in json file\r\n json_pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a positive list of integers along with a target and returns a subset of
def diophantine_subset_sum(number_list, target, time_limit=TIME_LIMIT): started_at = time.time() # Sort numbers list. number_list = sorted(number_list) # Build sums list. sums_list = [number_list[0]] for n in range(1, len(number_list)): sums_list.append(number_list[n] + sums_list[n-1])...
[ "def _select_sublist(lst, target):\n ln = len(lst)\n\n # Generate an array that indicates the decision bit for each element in the list.\n # If an element is deterministically true, then no decision bit is needed.\n choice_bits = [None] * ln\n x = 0\n for i in range(0, ln):\n if lst[i][1] n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert unit conversion with custom UnitRegistry
def test_convert_unit_with_custom_registry(test_df): df = get_units_test_df(test_df).rename(unit={"EJ/yr": "foo"}) # check that conversion fails with application registry with pytest.raises(pint.UndefinedUnitError): df.convert_unit("foo", "baz") # define a custom unit registry ureg = pint....
[ "def si_unit_conversion(self):\n pass", "def _convert_unit(self, unit):\n if unit in self.units:\n return self.units[unit]\n elif unit in unit_map:\n return unit_map[unit]\n else:\n raise SBMLError('Unit not recognized: ' + str(unit))", "def to_unit(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
insert a column to tb. if called, all operation related to db must be fitted.
def insert_column(self, tb_name, column_name, data_type): sentences = f""" ALTER TABLE {tb_name} ADD COLUMN {column_name} {data_type}; """ print(sentences) self.commit(sentences)
[ "def _addColumn(self, column, init_data):\n\t\tcommand = \"ALTER TABLE \" + TABLE_NAME + \" ADD COLUMN \" + str(column) + \" \" + self.getSQLiteType(init_data)\n\t\ttry:\n\t\t\tself._run_command(command)\n\t\texcept sqlite3.OperationalError:\n\t\t\tprint(\"Column \" + str(column) + \" already exists!\")", "def ad...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Throw double buffer into widget drawable
def on_draw(self, widget, cr): #print "starting to draw" if self.double_buffer is not None: self.draw_tiles() cr.set_source_surface(self.double_buffer, 0.0, 0.0) cr.paint() else: print('Invalid double buffer') #print "done drawing" ...
[ "def draw_bitmap(self, bitmap, x=0, y=0):\r\n height, width = bitmap.shape\r\n self.back_buffer[y:y + height, x:x + width] = bitmap", "def _createNewBuffer(self):\n\n\n if not self.window:\n g.trace('no window !!!!!!!!!!!!!!!!')\n g.trace(g.callers())\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure the double buffer based on size of the widget
def on_configure(self, widget, event, data=None): print "reconfiguring" # Destroy previous buffer if self.double_buffer is not None: self.double_buffer.finish() self.double_buffer = None # Create a new buffer self.double_buffer = cairo.ImageSurface(cairo....
[ "def __clicked_btn_buffer_size(self):\n text = self.__ui.le_buffer_size.text()\n self.__set_buffer_size(text)", "def set_size(self, *args, **kwargs):\n return _qtgui_swig.ber_sink_b_set_size(self, *args, **kwargs)", "def on_resize(self, event):\n # update / initialize height and ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shows a category item
def showItem(category_item_id): return render_template('item.html', item=db.findItem(id=category_item_id))
[ "def view_category(cat_id):\n session['target'] = url_for('view_category', cat_id=cat_id)\n sqlsession = SQLSESSION()\n category = sqlsession.query(Category).filter_by(id=cat_id).first()\n categories = sqlsession.query(Category).all()\n items = sqlsession.query(Item).filter_by(category_id=cat_id).all...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allow user to create new catalog item
def newItem(): if request.method == 'POST': db.createItem( title=request.form['title'], description=request.form['description'], category_id=request.form['category'], user_id=login_session['user_id']) flash("New catalog item created!", 'success') ...
[ "def create_item(self, user: User, **kwargs) -> None:", "def addCatalogItem(sport_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n if request.method == 'POST':\n newCatalogItem = Item(\n name=request.form['itemName'],\n description=request.form['itemDescripti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allows user to edit an existing category item
def editItem(category_item_id): editedItem = db.findItem(id=category_item_id) if editedItem.user_id != login_session['user_id']: return not_authorized() if request.method == 'POST': db.updateItem(editedItem, request.form) return redirect(url_for('showCatalog')) return render_temp...
[ "def editItem(category_name, item_name):\n if 'username' not in session:\n return redirect(url_for('login'))\n \n editedItem = cursor.query(Item).filter_by(name=item_name).first()\n category = cursor.query(Category).filter_by(name=category_name).first()\n if request.method == 'POST':\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allows user to delete an existing category item
def deleteItem(category_item_id): itemToDelete = db.findItem(id=category_item_id) if itemToDelete.user_id != login_session['user_id']: return not_authorized() if request.method == 'POST': db.deleteItem(itemToDelete) flash('%s Successfully Deleted' % itemToDelete.title, 'success') ...
[ "def deleteItem(category_name, item_name):\n if 'username' not in session:\n return redirect(url_for('login'))\n \n itemToDelete = cursor.query(Item).filter_by(name=item_name).first()\n category = cursor.query(Category).filter_by(name=category_name).first()\n if request.method == 'POST':\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute average return and of steps.
def compute_avg_return_and_steps(environment, policy, num_episodes=10): total_return = 0.0 total_steps = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 episode_steps = 0.0 while not time_step.is_last(): action_step = polic...
[ "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def average(self):\n return (self.current + self.last) / 2.0", "def _get_average(self):\r\n if self.at_bats == 0:\r\n return 0.0\r\n\r\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the camera's mac address as the serial number.
def serial_number(self) -> str: return self.mac_address
[ "def mac(self) -> str:\n return self.camera_info[\"wifi_mac\"]", "def get_mac():\n mac_int = uuid.getnode()\n mac_str = _mac_int_to_str(mac_int)\n return mac_str", "def mac_address(self) -> str:\n return self._device.mac", "def serial(self) -> str:\n return self.camera_info[\"dev...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if capture clip on motion is active.
def capture_clip_on_motion(self) -> bool: return self.data[Attribute.CAPTURE_CLIP_ON_MOTION]
[ "def capture_is_active(self):\n return self.um in self._streams", "def get_capturing(self):\n from ctypes import c_uint32,byref\n if self.handle == None: return False\n is_started = c_uint32()\n status = PvAPI.PvCaptureQuery (self.handle,byref(is_started))\n if status != ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if privacy mode is active.
def is_in_privacy_mode(self) -> bool: return self.data[Attribute.CAMERA_PRIVACY]
[ "def privacy_mode(self):\n return self._attrs.get('privacy_mode')", "def is_public(self):\n return self.privacy == Privacy.PUBLIC", "def is_aprentice(self):\n return self.user_profile_status == self.APPRENTICE", "async def set_privacy_mode(self, status):\n await self._set_config(pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Camera's wireless signal strength.
def wireless_signal_strength(self) -> int: return self.data[Attribute.WIRELESS_SIGNAL_STRENGTH]
[ "def signal_strength(self):\n strength = self.interface.get_signal_strength()\n self.logger.debug(\"Returning signal strength: \" + str(strength))\n return strength", "def signal_strength_percentage(self):\n return self._attrs.get('wifi_signal_strength')", "def GetCurrentSignalStreng...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Request a new thumbnail for the camera.
async def request_thumbnail(self) -> None: await self.vivintskyapi.request_camera_thumbnail( self.alarm_panel.id, self.alarm_panel.partition_id, self.id )
[ "async def request_new_image(blink, network, camera_id):\n url = f\"{blink.urls.base_url}/network/{network}/camera/{camera_id}/thumbnail\"\n return await http_post(blink, url)", "def get_thumbnail(self) -> Image.Image:\n logging.info(\"Getting thumbnail...\")\n t = sorted(\n self._t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the latest camera thumbnail URL.
async def get_thumbnail_url(self) -> str: # Sometimes this date field comes back with a "Z" at the end # and sometimes it doesn't, so let's just safely remove it. camera_thumbnail_date = datetime.strptime( self.data[Attribute.CAMERA_THUMBNAIL_DATE].replace("Z", ""), "%Y-%...
[ "def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n vine_url = self.get_url()\n res = self._http_request(vine_url)\n m = re.search(r'property=\"og:image\" content=\"(?P<thumbnail...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the rtsp URL for the camera.
async def get_rtsp_url(self, internal: bool = False, hd: bool = False) -> str: credentials = await self.alarm_panel.get_panel_credentials() url = self.data[f"c{'i' if internal else 'e'}u{'' if hd else 's'}"][0] return f"{url[:7]}{credentials[PanelCredentialAttribute.NAME]}:{credentials[PanelCred...
[ "def rtsp_stream_url(self) -> str:\n return self.properties.get(MessageField.RTSP_STREAM_URL.value)", "async def get_direct_rtsp_url(self, hd: bool = False) -> str:\n return (\n f\"rtsp://{self.data[Attribute.USERNAME]}:{self.data[Attribute.PASSWORD]}@{self.ip_address}:{self.data[Attribut...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the direct rtsp url for this camera, in HD if requested, if any.
async def get_direct_rtsp_url(self, hd: bool = False) -> str: return ( f"rtsp://{self.data[Attribute.USERNAME]}:{self.data[Attribute.PASSWORD]}@{self.ip_address}:{self.data[Attribute.CAMERA_IP_PORT]}/{self.data[Attribute.CAMERA_DIRECT_STREAM_PATH if hd else Attribute.CAMERA_DIRECT_STREAM_PATH_STANDA...
[ "def rtsp_stream_url(self) -> str:\n return self.properties.get(MessageField.RTSP_STREAM_URL.value)", "async def get_rtsp_url(self, internal: bool = False, hd: bool = False) -> str:\n credentials = await self.alarm_panel.get_panel_credentials()\n url = self.data[f\"c{'i' if internal else 'e'}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle a pubnub message addressed to this camera.
def handle_pubnub_message(self, message: dict) -> None: super().handle_pubnub_message(message) event = None if message.get(Attribute.CAMERA_THUMBNAIL_DATE): event = THUMBNAIL_READY elif message.get(Attribute.DING_DONG): event = DOORBELL_DING elif message...
[ "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def received_message(self, message):\r\n pass", "def handle(self, message: Message) -> None:", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 ==...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all .js files in the project root folder The project file is not included.
def get_all_js_files(self, root): res = [] for fname in os.listdir(root): mo = re.match(r'(\w+)\.js$', fname) if mo: res.append({ 'name': mo.group(1), 'src': file_contents(os.path.join(root, mo.group())) }) ...
[ "def javascript_files(self):\n return None", "def find_csprojs(root):\n return glob(f'{root}/*/**/*.csproj')", "def javascript_files(self):\r\n resourceManager = ResourcesManager.Instance()\r\n return resourceManager.GetAssetPaths('view_js') + resourceManager.GetAssetPaths('auth_js')", "def coff...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
if we've got a cropping annotation for the given fieldname and scale, set self._rescale to False, to prevent plone.app.imaging traverser to overwrite our cropped scale since the self.modified() method does not know about the currently requested scale name, we need to use the _rescale property
def _need_rescale(self, fieldname, scale): cropped = IAnnotations(self.context).get(PAI_STORAGE_KEY) if cropped and '%s_%s' % (fieldname, scale) in cropped: self._allow_rescale = False else: self._allow_rescale = True
[ "def scale(self, scale):\n self.image.scale(scale)\n self.rescale_annotations(scaling_factor=(scale, scale))", "def modified(self):\n if self._allow_rescale:\n return super(NamedfileImageScaling, self).modified()\n else:\n return 1", "def modifie...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
we overwrite the default method that would return the modification time of the context, to return a way back modification time in case the currently requested scale is a cropped scale. (so plone.scale does not create a new scale w/o cropping information
def modified(self): if self._allow_rescale: return super(ImageScaling, self).modified() else: return 1
[ "def modified(self):\n if self._allow_rescale:\n return super(NamedfileImageScaling, self).modified()\n else:\n return 1", "def modified(self):\n if self._allow_rescale:\n return super(ImageScalingAT, self).modified()\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
we overwrite the default method that would return the modification time of the context, to return a way back modification time in case the currently requested scale is a cropped scale. (so plone.scale does not create a new scale w/o cropping information
def modified(self): if self._allow_rescale: return super(NamedfileImageScaling, self).modified() else: return 1
[ "def modified(self):\n if self._allow_rescale:\n return super(ImageScaling, self).modified()\n else:\n return 1", "def modified(self):\n if self._allow_rescale:\n return super(ImageScalingAT, self).modified()\n else:\n return 1", "def last_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the pair and reload data if its new.
def set_pair(self, pair: Pair): if pair != self.pair: self.pair = pair self.load_candles()
[ "def set_pair(self, pair: StudentPair) -> None:\n self._edit_pair = pair\n self.line_edit_title.setText(str(self._edit_pair[\"title\"]))\n self.line_edit_lecturer.setText(str(self._edit_pair[\"lecturer\"]))\n self.combo_box_type.setCurrentText(str(self._edit_pair[\"type\"]))\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the granularity and reload data if its new.
def set_gran(self, gran: Gran): if gran != self.gran: self.gran = gran self.load_candles()
[ "def granularity(self, val):\n self.__granularity = val", "def reset_granularity() -> None:\n global SYSTEM_GRANULARITY # pylint: disable=global-statement\n SYSTEM_GRANULARITY = DEFAULT_GRANULATITY", "def inGranularity(self: ActualQuery, granularity: Granularity) -> ActualQuery:\n self._que...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the quote kind and reload data if its new.
def set_quote_kind(self, quote_kind: QuoteKind): if quote_kind != self.quote_kind: self.quote_kind = quote_kind if self.geo is None: self.load_candles() else: self.geo.update(quote_kind=quote_kind) self.chart.redraw(self.geo)
[ "def set_quote(self, quote):\n self.quote = quote", "def quote(self, quote):\n\n self._quote = quote", "def update_quote(self):\n quote = self.get_quote()\n self.api.update_profile(description=quote)\n self.current_quote = quote\n\n return True", "def quotes(self, quo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of completion strings Simple completion based on pythonlike identifiers and whitespace
def get_completions(self, info): items = [] if (info.line.strip().startswith(('import ', 'from ')) and info.is_python_like): items += module_completion(info.line, [info.filename]) elif info.obj: base = info.obj tokens = set(info.split_wo...
[ "def completenames(self, text, line, begidx, endidx):\n command = text\n if self.case_insensitive:\n command = text.lower()\n\n # Call super class method. Need to do it this way for Python 2 and 3 compatibility\n cmd_completion = cmd.Cmd.completenames(self, command)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the definition for an object within a set of source code This is used to find the path of pythonlike modules (e.g. cython and enaml) for a goto definition
def get_definition(self, info): token = info.obj lines = info.lines source_code = info.source_code filename = info.filename line_nr = None if '.' in token: token = token.split('.')[-1] line_nr = get_definition_with_regex(source_code, token,...
[ "def _REPL_findsource(obj):\n return dill.source.findsource(obj)", "def _get_defined_in(py_object, parser_config):\n # Every page gets a note about where this object is defined\n # TODO(wicke): If py_object is decorated, get the decorated object instead.\n # TODO(wicke): Only use decorators that support thi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locate a module path based on an import line in an pythonlike file import_line is the line of source code containing the import alt_path specifies an alternate base path for the module stop_token specifies the desired name to stop on This is used to a find the path to pythonlike modules (e.g. cython and enaml) for a go...
def python_like_mod_finder(import_line, alt_path=None, stop_token=None): if stop_token and '.' in stop_token: stop_token = stop_token.split('.')[-1] tokens = re.split(r'\W', import_line) if tokens[0] in ['from', 'import']: # find the base location tr...
[ "def edit_import_line(importline):\n filename = find_source_file_from_import_line(importline)\n if filename:\n ed = get_editor()\n return run_config(ed, filename)\n else:\n print (\"Could not find source for {0}.\".format(importline.strip()), file=sys.stderr)", "def moduleCompletion(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the definition of an object within a source closest to a given line
def get_definition_with_regex(source, token, start_line=-1): if not token: return None if DEBUG_EDITOR: t0 = time.time() patterns = [ # python / cython keyword definitions '^c?import.*\W{0}{1}', 'from.*\W{0}\W.*c?import ', 'from .* c?i...
[ "def _get_relevant_line(self):\n # () -> (Phi.Line)\n line_name = self._get_line_name()\n print(\"looking for \"+str(line_name))\n return Phi.findLine(line_name)", "def loc(line, source):\n\n\t# Initialize list of used lines\n\tif not hasattr(loc, 'used'):\n\t\tloc.used = []\n\n\t# spl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of all pythonlike extensions
def python_like_exts(): exts = [] for lang in sourcecode.PYTHON_LIKE_LANGUAGES: exts.extend(list(sourcecode.ALL_LANGUAGES[lang])) return ['.' + ext for ext in exts]
[ "def all_editable_exts():\r\n exts = []\r\n for (language, extensions) in sourcecode.ALL_LANGUAGES.items():\r\n exts.extend(list(extensions))\r\n return ['.' + ext for ext in exts]", "def get_extensions(ls_args):\n return {get_ext(line) for line in get_ls_lines(ls_args) if has_ext(line)}", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of all editable extensions
def all_editable_exts(): exts = [] for (language, extensions) in sourcecode.ALL_LANGUAGES.items(): exts.extend(list(extensions)) return ['.' + ext for ext in exts]
[ "def do_list_extensions(client, _args):\n extensions = client.list_extensions.show_all()\n fields = [\"Name\", \"Summary\", \"Alias\", \"Updated\"]\n utils.print_list(extensions, fields)", "def list_extensions(self, **_params):\r\n return self.get(self.extensions_path, params=_params)", "def ext...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the recipe for AWS>GCP disk copy.
def testRunRecipe(self): warnings.filterwarnings( action="ignore", message="unclosed", category=ResourceWarning) # Load the recipe, set the arguments, and run self.test_state.LoadRecipe(RECIPE, TEST_MODULES) self.test_state.command_line_options = { 'aws_region': self.aws_region, 'gc...
[ "def test_cont_copy(self):\n # pylint: disable=consider-using-with\n\n # Create a temporary directory, with one file into it and copy it into\n # the container. Check the return-code only, do not verify the data.\n # tempfile() will remove the directory on completion.\n src_dir =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes an AWS EBS Snapshot with ID `id`.
def _removeAWSSnapshot(self, snap_id: str): log.warning(f'Deleting AWS EBS Snapshot {snap_id}') ec2_client = boto3.client('ec2', region_name=self.aws_region) try: ec2_client.delete_snapshot(SnapshotId=snap_id) except Exception as error: # pylint: disable=broad-except log.error(f'Failed to d...
[ "def delete(self):\n assert 'id' in self\n url = '/'.join([get_url('cloudblockstorage'), 'snapshots',\n str(self['id'])])\n handle_request('delete', url)", "def _delete_snapshot(self, vol_id, snap_id, **kwargs):\n\n return self._snap_api_submit(vol_id, snap_id, method='DELET...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes an S3 object at `path`.
def _removeAWSS3Object(self, path: str): log.warning(f'Deleting AWS S3 object {path}') bucket, key = storage_utils.SplitStoragePath(path) s3_client = boto3.client('s3') try: s3_client.delete_object(Bucket=bucket, Key=key) except Exception as error: # pylint: disable=broad-except log.err...
[ "def delete_object(self, s3_path):\n logging.info(\"Deleting \\\"{}\\\" file from S3\".format(s3_path))\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n self.s3_resource.ObjectSummary(bucket_name, key).delete()", "def delete(self, path):\n bucket, object_path = parse_gcs_path(path...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete a GCS object at `path`.
def _removeGCSObject(self, path: str): log.warning(f'Deleting GCS object {path}') try: storage.GoogleCloudStorage(self.gcp_project_id).DeleteObject(path) except Exception as error: # pylint: disable=broad-except log.error(f'Failed to delete GCS Object {path}: {str(error)}')
[ "def delete(self, path):\n bucket, object_path = parse_gcs_path(path)\n request = storage.StorageObjectsDeleteRequest(\n bucket=bucket, object=object_path)\n try:\n self.client.objects.Delete(request)\n except HttpError as http_error:\n if http_error.status_code == 404:\n # Retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove GCE Image with name `name`.
def _removeGCEImage(self, name: str): log.warning(f'Deleting GCE Image {name}') try: compute.GoogleComputeImage( self.gcp_project_id, self.gcp_zone, name ).Delete() except Exception as error: # pylint: disable=broad-except log.error(f'Failed to delete GCE Image {name}: {str(erro...
[ "def delete_image_by_name(self, name):\n image_info = self.get_image_info(name)\n if image_info is None:\n raise RuntimeError('No such image on the server')\n normalized_url = image_info['links']['normalized']\n self.delete_image(normalized_url)", "def del_image(self, name):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the disk with name `name`.
def _removeGCEDisk(self, name: str): log.warning(f'Deleting GCE Disk {name}') try: gce_disk_client = common.GoogleCloudComputeClient( project_id=self.gcp_project_id).GceApi().disks() gce_disk_client.delete( project=self.gcp_project_id, zone=self.gcp_zone, disk...
[ "def delete_disk(diskName=None):\n pass", "def delete_disk(self, disk_name):\n try:\n # Get the ceph image name.\n ceph_img_name = self.__get_ceph_image_name(disk_name)\n except DBException as e:\n logger.exception('')\n return self.__return_error(e)\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add blogpost to manuscript
def add_blogpost(manuscript, subject, url): line_number = 0 with open(manuscript, "r") as file: lines = file.readlines() for line in lines: if ("## ブロマガ全集" in line): lines.insert(line_number + 2, f"- [{subject}]({url})\n") with open(manuscript, "w") as file: ...
[ "def add_post():\n # Implement me!\n print \"adding post\"\n p_id = db.post.insert(\n post_content=request.vars.post_content\n )\n p = db.post(p_id)\n return response.json(dict(post=p))", "def add_post():\n\n post_id = db.post.insert(post_content=request.vars.post_content)\n\n post ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evalutes whether the passed dict matches the complex logic of the LogicNode
def eval_logic(self, checkDict): result = True #gets individual evaluations from children passList = [] for child in self.children: myVal = child.eval_comparison(checkDict) passList.append(child.eval_comparison(checkDict)) #if only one child returns the o...
[ "def eval_match_conditions(self):\r\n for condition, item in self.conds.iteritems():\r\n self.is_valid_keys(item.keys())\r\n if condition() == self.op.CONTINUE:\r\n continue\r\n if condition() == self.op.PASS:\r\n pass", "def test_RestrictingNo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add the keeper to the player's field
def play(self, game, playerNumber): # needs check for keeper limit? super(Keeper, self).play(game, playerNumber) p = game.players[playerNumber] p.field.add(self)
[ "def add_player(self, player):\n\t\tself.players.append(player)", "def addPlayer(self, player):\n self.players.append(player)", "def add_player(self, players):\n try:\n players[self.ward]\n except:\n players[self.ward] = self", "def add_played_disk(self, x, y, player...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns `model_fn` closure for TPUEstimator. model_fn_builder actually creates the model function using the passed parameters for num_labels, learning_rate, etc.
def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps): def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" input_ids = features["input_ids"] input_mask = features["input_mask"] ...
[ "def model_fn_builder(self):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines input column according to model (raw text or clean text)
def define_input_output(self): if self.classifier_model.text_preprocessing == ClassifierModel.TEXT_CLEAN: self.clean_range() self.input_col = self.df["clean_text"] elif self.classifier_model.text_preprocessing == ClassifierModel.TEXT_RAW: self.input_col = self.df[se...
[ "def reconstruct_input_ext(self, model_in):", "def send_input_text_to_model(self, input_text: str) -> Dict[str, str]:\n pass", "def _build_data_from_text(self, text):\n # get CSV field\n text = text.split(self._data_sep)[self._data_col]\n # tokenize\n return super()._build_dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a saved state value, None if item is undefined.
def __getitem__(self, item): return self._state["data"].get(item, None)
[ "def __getitem__(self, item):\n return self._state[\"data\"].get(item, None)", "def GetItem3StateValue(self, item):\r\n\r\n return item.Get3StateValue()", "def state(self, key):\n\n if key in st.session_state:\n return st.session_state[key]\n\n return None", "def extra_r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Interleave bits from two sort keys to form a joint sort key. Examples that are similar in both of the provided keys will have similar values for the key defined by this function. Useful for tasks with two text fields like machine translation or natural language inference.
def interleave_keys(a, b): def interleave(args): return ''.join([x for t in zip(*args) for x in t]) return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)
[ "def addKey(s1, s2): \r\n return [i ^ j for i, j in zip(s1, s2)]", "def concatKey(str1,str2):\n return concat(concat(str1, '_'), str2)", "def merge_and_permute(left, right):\n\n keys48 = [-1] # normalize indexes\n for key in range(1, 17):\n concat = (left[key] << 28) | right[key]\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }