query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Creates a millisecond based timestamp of UTC now.
def get_utc_now_timestamp() -> int: return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1000)
[ "def utc_now_ms():\n return round((datetime.utcnow() - EPOCH).total_seconds() * 1e3)", "def timestamp_ms():\n dt = datetime.now()\n return int((mktime(dt.timetuple()) + dt.microsecond / 1e6) * 1000)", "def now() -> datetime:\n now = datetime.now(tz=timezone.utc)\n return now.replace(micro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
usalbe only when g_wc was used to find pr_wv
def get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode): cnt_list =[] for b, g_wc1 in enumerate(g_wc): pr_wn1 = len(pr_sql_i[b]["conds"]) g_wn1 = g_wn[b] # Now sorting. # Sort based wc sequence. if mode == 'test': idx1 = argsort(array(g_wc1)) ...
[ "def set_wv_dict(self, wv_dict):\r\n self.wv_dict = wv_dict", "def build(self,\n wv, # wvo = an intance of dense word vectors\n sense_dim_num=10000, # unused\n save_pkl=True, # unused\n norm_type=\"sum\",\n weight_type=\"score\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get master monitor pid
def get_pid(ssh): pid_file_path = data_dir.MM_PID_DIR+"master_monitord.pid" #獲得master_monitord.pid之檔案路徑 cmd = "sudo cat %s" % pid_file_path #組合cat指令 s_stdin, s_stdout, s_stderr = ssh.exec_command(cmd) #透過ssh執行指令 return s_stdout.read() #pid, error = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE).communicate...
[ "def get_pid():\n\tpid_file_path = data_dir.MM_PID_DIR+\"master_monitord.pid\"\n\tcmd = \"sudo cat %s\" % pid_file_path\n\tpid, error = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE).communicate()\n\tif error == \"\":\n\t\treturn int(pid)\n\treturn False", "def get_host_master_id(self):\r\n return s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch a html page from url and store in store_path
def get_page_and_store(url, cache_path=None): page = urllib2.urlopen(url).read() if cache_path is not None: open(cache_path, 'w').write(page) return page
[ "def fetch_article(directory_path, url):\n opener = urllib.build_opener()\n opener.addheaders = [(\"Accept-Charset\", \"utf-8\")]\n\n response = opener.open(url)\n html_data = response.read()\n response.close()\n\n path = join(directory_path, \"article.html\")\n with open(path, \"wb\") as file:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of urls of infobox pages
def get_infobox_urls(mapping_page): pattern = re.compile('index\.php/Mapping_en:Infobox_[-\w\./]+') return pattern.findall(mapping_page)
[ "def __url_list(self, page):\n url_list = []\n for tag_a in page.find_all('a'):\n href = str(tag_a.get('href'))\n if self.__verify(href):\n url = parse.quote(self.__add_main_site(href), '/:#')\n url_list.append(url)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return class of the infobox, given the HTML DBpedia infobox_page class is in CamelCase (possibly with colon and space), exactly as appear in the infobox_page
def get_class(infobox_page): pattern = re.compile('OntologyClass:[-\w: ]+') wiki_class = pattern.findall(infobox_page) if len(wiki_class) == 0: return None else: return wiki_class[0].replace('OntologyClass:', '')
[ "def parse_page_type_get_infobox(\n html: Tag) -> Tuple[PageType, Optional[Dict[str, Tag]]]:\n infoboxes = html.find_all('table', class_='infobox')\n if len(infoboxes) == 1:\n infobox_dict = parse_infobox(infoboxes[0])\n # Check if movie\n image_caption = infobox_dict.get('_image_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return pairs of (infobox, class) infobox format is lower case with hyphen (e.g. 'aflplayer2') class format is as returbed by get_class.
def get_infobox_class_pairs(from_cache=True): infobox_urls = [] infobox_class_pairs = [] for i, mapping_url in enumerate(MAPPINGS_URLS): cache_path = HTML_CACHE_PATH_PREFIX + 'main_mapping_en_' + str(i+1) + '.html' if from_cache: mapping_page = open(cache_path, 'r').read() ...
[ "def convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):\n box_list = [b for b in cls_boxes if len(b) > 0]\n if len(box_list) > 0:\n boxes = np.concatenate(box_list)\n else:\n boxes = None\n if cls_segms is not None:\n segms = [s for slist in cls_segms for s in slist]\n els...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
distribute targets[lo, hi) into nbucket even partitions the distribution is used by nbucket processes for parallel computation
def dist(targets, lo, hi, nbucket): distribution = [] for _ in range(nbucket): distribution.append([]) for i in range(lo, hi): if 0 <= i and i < len(targets): distribution[i % nbucket].append(targets[i]) return distribution
[ "def buckets(data, n):\n # Shuffle all datasets to get a more consistent workload for all threads.\n random.shuffle(data)\n\n for i in range(0, len(data), n):\n yield data[i:i + n]", "def distribute_discrete(sizes, groups, pow=1.0):\n chunks = np.array(sizes, dtype=np.int64)\n weights = np.p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
run tweets collection on a list of users using one set of apikey, (apikey, users) as args the list of users is run sequentially establish a new database connection for each user, and commit insertions and close connection when done
def runner(args): apikey, users = args api = collect.mk_api(apikey) for user in users: db_connection = db.mk_connection() collect.collect_user_tweets(api, user, collect.mk_sql_insert_handler(db_connection)) db.close_connection(db_connection)
[ "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns (indent,rest) depending on line indentation
def separeIndent(self,line): p=0 while p<len(line) and line[p] in string.whitespace: p=p+1 rest=line[p:] return line[:p],rest
[ "def get_indent(line):\n indent = indent_regex.match(line)\n if indent is None:\n return ''\n else:\n return indent.group()", "def get_indent(self) -> int:\n return self._indent", "def _evaluate_indent_variation(token_seq):\n up = sum(token.type == tokenize.INDENT for token in token...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads the image from disk, returns image with axis in the natural order (deep last). Data format should be numpy.uint8. Time complexity goes square if numpy.uint16, especially for segmentation.
def load(self, path, shape=(1024, 1024, 35), dtype='uint16'): valid_dtypes = ['uint8', 'uint16'] if dtype not in valid_dtypes: raise ValueError('dtype should be either one of %s' % ', '.join(valid_dtypes)) im = io.imread(path) im = numpy.rollaxis(im, 0, 3) if im.sha...
[ "def load_depth(path):\n d = imageio.imread(path)\n return d.astype(np.float32)", "def load_images(mraw, h, w, N, bit=16, roll_axis=True):\n\n if int(bit) == 16:\n images = np.memmap(mraw, dtype=np.uint16, mode='r', shape=(N, h, w))\n elif int(bit) == 8:\n images = np.memmap(mraw, dtype=np.u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filters by first convolving the background with a gaussian filter. Then substract the obtained image to the origin and finally refilter with another Gaussian filter with a variance 10 times smaller. Variance specified in utils module.
def filter(self, op=GaussianFilter): if self._verbose > 0: print("Filtering...") # Import from utils specified params. params = get_filtering_params() negative = self.image_raw - op(sigma=params['sigma_bgd']).convolve(self.image_raw) self.image_filtered = op(sigma...
[ "def GaussianBlur():\n # cv2.GaussianBlur(src, ksize, sigmaX, dst=None, sigmaY=None, borderType=None)", "def differenceOfGausssians(image,sigma0, sigma1,window_size, roi, out = None):\n return (vigra.filters.gaussianSmoothing(image,sigma0,window_size=window_size,roi = roi)-vigra.filters.gaussianSmoothing...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DEPRECATED, replaced by detect_and_fit for simplicity and speed issues. Detect spots with a specified detector (from the spotdetector.py module) and the detection params from utils module. Spots are identified by their position, i.e. 'x.y.z'.
def _detect_spots(self, detector=LocalMax, **kwargs): if self._verbose > 0: print("Detecting...", end="") spots = detector(**kwargs).locate(self.image_filtered) # Spots are identified by their position: self.spots = [Spot(tuple(s)) for s in spots] if self._verbose >...
[ "def spotmatch(xpix,ypix,expected_x_fp=None,expected_y_fp=None,expected_location=None,verbose=0,match_radius_pixels=70,fvc2fp=None) :\n\n if shutil.which(\"match_positions\") is None :\n raise RuntimeError(\"match_positions is not in PATH. You need to install spotmatch first. It's in https://desi.lbl.gov/...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DEPRECATED Jump to next paragraph. This method goes through all the detected spots and fit a specified spot_model separately to each of them. TODO DONE If a model can not be safely fit to the spot, then the spot is deprecated and deleted from the spots list. Spot_models are built in the fitters module. Extract_cube com...
def fit_spots(self, spot_model=Mixture, kind='individual'): model = spot_model() # print(model) # if model.kind == 'individual': # # loop = self.spots # # # to_delete = [] # if self._verbose > 0: # loop = tqdm.tqdm(loop, desc=...
[ "def _detect_spots(self, detector=LocalMax, **kwargs):\n if self._verbose > 0:\n print(\"Detecting...\", end=\"\")\n\n spots = detector(**kwargs).locate(self.image_filtered)\n\n # Spots are identified by their position:\n self.spots = [Spot(tuple(s)) for s in spots]\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method is intended at segmenting the nucleis in DAPIimage on Mask images (not FISH). However basic, it seems to give a rather good. approximation. The workflow is MIP > local grad > Otsu thresholding > Connected components labelling > Filtering components based on their size (using either handthreshold or KMeans t...
def segment(self, sg=NucleiSegmenter()): # mask_path = self.name.replace('w1', 'w3').replace('561', '405') # cell_mask = io.imread(mask_path) # self.mask = numpy.swapaxes(cell_mask, 0, 2) with warnings.catch_warnings(): warnings.simplefilter('ignore') if self._ver...
[ "def segment_nuclei3D_5(instack, sigma1=3, sigma_dog_small=5, sigma_dog_big=40, seed_window=(70,100,100),\n erosion_length=5, dilation_length=10, sensitivity=0.5, size_min=1e4, \n size_max=5e5, circularity_min=0.5, display=False):\n\n\n def smart_dilate(stack, labelmas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform sensitivity analysis (via backpropagation; Simonyan et al. 2014) to determine the relevance of each image pixel for the classification decision. Return a relevance heatmap over the input image.
def sensitivity_analysis(model, image_tensor, device, postprocess='abs'): if postprocess not in [None, 'abs', 'square']: raise ValueError("postprocess must be None, 'abs' or 'square'") # Forward pass. X = torch.from_numpy(image_tensor) # convert numpy or list to tensor X.unsqueeze_(0...
[ "def frequency_tuned_saliency(img_src):\n image = cv2.imread(img_src)\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)\n #mean of each channel\n means = []\n for c in range(image.shape[2]):\n means.append(image[:, :, c].mean())\n means = np.asarray(means)\n\n image = cv2.medianBlur(ima...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
In this function, the server receive data from the front end, call the preprocessing program and after constructing the model, it returns the plan generated by our model back to the front end.
def receiveData(): preference = request.get_json() program = preference.pop('program') enroll_yr = preference.pop('enroll_yr') enroll_sem = preference.pop('enroll_sem') spec = 0 if 'spec' in preference: spec = int(preference['spec']) preference.pop('spec') program_...
[ "def main(**kwargs):\n \n # process the data using Preprocessing class\n _logger.debug(\"[MainPreprocessing] initiated...\")\n processed_data = Preprocessing()\n _logger.debug(\"[MainPreprocessing] completed successfully.\")\n\n # build the optimisation model, where objectives and constraints are ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function obtains data from the table in our GUI when updating it. After receiving the table, the MiniZinc model would be called and replan the courses.
def returnTheTable(): preference = request.get_json() # Obtain the list containing replaced courses and the to-be-updated plan replaced = preference.pop('replaced') oldPlan = dict() readPlan = open('plan.txt') try: content = str(readPlan.read()) courses = content.split(' ...
[ "def refresh_course(self):\r\n self.course = modulestore().get_course(self.course.id)", "def reload_course(self):\r\n self.course = self.store.get_course(self.course.id)", "def update_gui(self):\n if self.cycle_reading_flag:\n self.fill_table_data_from_stm_data()\n pass", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs all the tests in the experiment with the given file and number of samples
def run_tests(file, samples): # Get the script dir, name and check if the file given exists test_dir = os.path.dirname(os.path.realpath(__file__)) script_name = os.path.basename(__file__) if not os.path.isfile(os.path.join(test_dir, file)): sys.stderr.write('{0}: file \'{1}\' not found\n'.format...
[ "def main():\n for filename in sys.argv[1:]:\n test(filename)", "def test_example(self):\n\n\t\tfrom citrine_challenge import BaseSampler\n\n\t\tinput_files = self._input_files\n\n\t\toutput_files = [\n\t\t\tos.path.join(self._output_prefix, \"test_example/some_output_file.txt\"),\n\t\t]\n\n\t\tn_result...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
R""" equality comparison between this and another Classifier, simply checks if A B == 0
def __eq__(self,other): return (self - other == 0.)
[ "def __eq__(self, other):\r\n if isinstance(other, vec4):\r\n return self.x==other.x and self.y==other.y and self.z==other.z\r\n else:\r\n return 0", "def __eq__(self, other):\n return (self.SSE == other.SSE) and self.equals(other)", "def __eq__(self, other):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
R""" difference between this and another Graph, just the norm between graphwide Graphlet Degree Vectors
def __sub__(self,other): return np.linalg.norm(self.ngdv-other.ngdv)
[ "def dist(self, G1, G2):\n\n adj1 = nx.to_numpy_array(G1)\n adj2 = nx.to_numpy_array(G2)\n dist = np.linalg.norm((adj1 - adj2))\n self.results['dist'] = dist\n self.results['adjacency_matrices'] = adj1, adj2\n return dist", "def gradient_other(self):\n # This is just t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
R""" builds the GraphLibrary from neighborhoods
def build(self,neighborhoods,k=5): g_idx = np.zeros(len(neighborhoods),dtype=np.int) for i, nn in enumerate(neighborhoods): G = Graph(nn,k) g_idx[i] = self.encounter(G) for i, sig in enumerate(self.sigs): if sig not in self.lookup: self.lookup[...
[ "def build_computational_graph():\n pass", "def _construct_graph(self):\n raise NotImplementedError", "def makeGraph(self):\n self.floorGraph = graph.Graph()\n file = open(\"edges.csv\")\n edges = file.readlines()\n for edge in edges:\n params = edge.split(\",\")...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extended_euclidean_algorithm(a, b) The result is the largest common divisor for a and b.
def extended_euclidean_algorithm(a, b): if a == 0: return b, 0, 1 else: g, y, x = extended_euclidean_algorithm(b % a, a) return g, x - (b // a) * y, y
[ "def extended_euclidean_algorithm(a, b):\n r0, r1 = a, b\n x0, x1 = 1, 0\n y0, y1 = 0, 1\n while r1:\n # q_{i} = r_{i-1} // r_{i}\n q = r0 // r1\n # r_{i+1} = r_{i-1} - q_{i} * r_{i}\n r0, r1 = r1, r0 - q * r1\n # x_{i+1} = x_{i-1} - q_{i} * x_{i}\n x0, x1 = x1,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
modular_inverse(e, z) Calculates modular multiplicative inverse for e and t.
def modular_inverse(e, z): g, x, y = extended_euclidean_algorithm(e, z) if g != 1: raise Exception('Modular inverse does not exist') else: return x % z
[ "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def modular_inverse(self, c1, c2, N):\n i = gmpy2.invert(c2, N)\n mx = pow(c1, self.a, N)\n my = po...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An account alias associated with a customer's account.
def account_alias(self) -> pulumi.Input[str]: return pulumi.get(self, "account_alias")
[ "def account_alias(self):\n return self._account_alias", "def account_alias(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias\")", "def get_account_alias(self):\r\n return self.get_response('ListAccountAliases', {},\r\n list_marker='AccountAl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An AWS Support App resource that creates, updates, reads, and deletes a customer's account alias.
def __init__(__self__, resource_name: str, args: AccountAliasArgs, opts: Optional[pulumi.ResourceOptions] = None): ...
[ "def update_app_alias(self, api_key, device_name, app_raw_name, app_alias):\n raise NotImplementedError", "def account_alias_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias_resource_id\")", "def create_account_alias(self, alias):\r\n params = {'AccountAlia...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an existing AccountAlias resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'AccountAlias': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = AccountAliasArgs.__new__(AccountAliasArgs) __props__.__dict__["accou...
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Account':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = AccountArgs.__new__(AccountArgs)\n\n __props__.__dict__[\"account...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An account alias associated with a customer's account.
def account_alias(self) -> pulumi.Output[str]: return pulumi.get(self, "account_alias")
[ "def account_alias(self):\n return self._account_alias", "def account_alias(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_alias\")", "def get_account_alias(self):\r\n return self.get_response('ListAccountAliases', {},\r\n list_marker='AccountAli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unique identifier representing an alias tied to an account
def account_alias_resource_id(self) -> pulumi.Output[str]: return pulumi.get(self, "account_alias_resource_id")
[ "def account_alias(self):\n return self._account_alias", "def account_alias(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias\")", "def account_alias(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_alias\")", "def alias_canonical_id(self) -> str:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Empty entry point to the Lambda function invoked from the edge.
def lambda_handler(event, context): return
[ "def main():\n lambda_handler(\"event\", \"context\")", "def lambda_handler(event, context):\n return ackermann(event[\"m\"], event[\"n\"])", "def test_lambda_support_no_parameters_no_body(self):\n self.assert_contains_lambda_expression_in_m(\n parse.parse(setup_java_class(\"() -> {};\")...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method updates the image data. This currently encodes the numpy array to jpg but can be modified to support other encodings. frame Numpy array containing the image data of the next frame in the project stream.
def set_frame_data(self, frame): ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution)) if not ret: raise Exception('Failed to set frame data') self.frame = jpeg
[ "def _write_frame(self : \"animation\",\n frame : \"np.ndarray\"\n ):\n self._writer.append_data(frame)\n self._frame_number += 1\n self._prevFrame = frame", "def UpdateFrames(image=None):\n pass", "def send_frame(self):\n frame = self.frame_b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the DeepLens inference loop frame by frame
def infinite_infer_run(): try: # This cat-dog model is implemented as binary classifier, since the number # of labels is small, create a dictionary that converts the machine # labels to human readable labels. model_type = 'classification' output_map = {0: 'dog', 1: 'cat'} ...
[ "def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)", "def run_inference(runtime, net_id, images, labels, input_binding_info, output_binding_info):\n output_tensors = ann.make_output_tensors([output_binding_info])\n for idx, im in enumerate(images):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Empty entry point to the Lambda function invoked from the edge.
def lambda_handler(event, context): return
[ "def main():\n lambda_handler(\"event\", \"context\")", "def lambda_handler(event, context):\n return ackermann(event[\"m\"], event[\"n\"])", "def test_lambda_support_no_parameters_no_body(self):\n self.assert_contains_lambda_expression_in_m(\n parse.parse(setup_java_class(\"() -> {};\")...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method updates the image data. This currently encodes the numpy array to jpg but can be modified to support other encodings. frame Numpy array containing the image data of the next frame in the project stream.
def set_frame_data(self, frame): ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution)) if not ret: raise Exception('Failed to set frame data') self.frame = jpeg
[ "def _write_frame(self : \"animation\",\n frame : \"np.ndarray\"\n ):\n self._writer.append_data(frame)\n self._frame_number += 1\n self._prevFrame = frame", "def UpdateFrames(image=None):\n pass", "def send_frame(self):\n frame = self.frame_b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the fav drinks for a given user id.
def get_fav_drinks(self, user_id): assert type(user_id) == str return next((fd.get('drink_id') for fd in self.favorite_drinks if fd.get('user_id')==user_id), None)
[ "def get_favorites(self, user_id=None):\n if not user_id:\n user_id = self.user_id\n\n favorite_decks = self.data_source.get_favorites(user_id)\n\n return favorite_decks", "def add_fav_drinks(self, user_id, drinks): \n assert type(user_id) == str\n assert type(drinks)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Incrementally generates fav drink ids.
def __generate_id(self): ids = [int(fd.get('id')) for fd in self.favorite_drinks] return str(max(ids)+1)
[ "def update_next_id(cls):\n cls.next_id += 1", "def incr_circuit_fav_count(self, circuit_id):\n key = ':'.join(\n [CIRCUIT_NMBR_FAVS_1, \n str(circuit_id), \n CIRCUIT_NMBR_FAVS_2]\n ) \n self.RS.incr(key)", "def increment_ID(self):\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a list of drinks to the user's favorite_tr_drinks. At least one drink needs to exist in the drinks object.
def add_fav_drinks(self, user_id, drinks): assert type(user_id) == str assert type(drinks) == list fav_drinks = self.get_fav_drinks(user_id) user_check = self.users.get_user_name(user_id) drinks_check = [self.drinks.get_drinks_by_flavor_and_type(d.get('flavor'), d.get('type')) ...
[ "def add_fav_drink(self, user_id, drink_id):\n assert type(user_id) == str\n assert type(drink_id) == str \n\n existing_drink = False if self.drinks.get_drink_by_id(drink_id) is None else True\n existing_user = False if self.users.get_user_name(user_id) is None else True\n if n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a single existing drink id to a user's fav_drinks.
def add_fav_drink(self, user_id, drink_id): assert type(user_id) == str assert type(drink_id) == str existing_drink = False if self.drinks.get_drink_by_id(drink_id) is None else True existing_user = False if self.users.get_user_name(user_id) is None else True if not existing...
[ "def add_fav_drinks(self, user_id, drinks): \n assert type(user_id) == str\n assert type(drinks) == list\n\n fav_drinks = self.get_fav_drinks(user_id)\n user_check = self.users.get_user_name(user_id)\n drinks_check = [self.drinks.get_drinks_by_flavor_and_type(d.get('flavor'), d.ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes a single drink id from a given user's favorite_tr_drinks
def delete_fav_drink(self, user_id, drink_id): assert type(user_id) == str assert type(drink_id) == str drinks = self.get_fav_drinks(user_id) user_check = self.users.get_user_name(user_id) if drinks is not None and drink_id in drinks: drinks.remove(drink_id) e...
[ "def favourite_delete(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['id'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# Remove the thrower from the logged in thrower's favourites and return\n\t\t#\tthe result\n\t\treturn ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and displays a simple frame containing the RichTextPanel.
def showEditorWindow(parent, title, allowEditting = True): frame = wx.Frame(parent, -1, title, size=(630, 320), style = wx.DEFAULT_FRAME_STYLE) panel = RichTextPanel(allowEditting, frame, -1) #frame.Fit() #frame.SetMinSize(frame.GetSize()) frame.Show() return panel
[ "def create(self, parent):\n\n self.widget = wx.html.HtmlWindow(parent)", "def _gen_new_frame(self):\n\t\tif self.frame:\n\t\t\tself.frame.destroy()\n\t\tself.frame = tk.Frame(master=self.window, bg='#455A64')\n\t\tself.frame.pack(expand=True, fill='both')", "def render(self) -> None:\n HtmlGui(na...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Indicates that we are beginning a new frame for the GIF. A new Figure object is created, using specifications provided to the Gif's constructor. Note that you are constrained to make one frame at a timefor every start_frame, there must be a end_frame without another start_frame in between.
def start_frame(self): # Check whether we're supposed to make a frame on this iteration: if self.frame_count % self.stride != 0: return # Check whether we're already making a frame. if self.in_scope: print("The Gif object for {} has encountered 'start_frame' tw...
[ "def create_frame_start(self):\n self.frame_start = self.create_frame(self.root)", "def _init_frame(self : \"animation\",\n init_frame : \"matplotlib.figure.Figure\",\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\"\n ):\n self._cframe = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pushes game state onto history.
def _push_history(self): self._history.append(self._state)
[ "def __update_history(self) -> None:\n self.__history.append(self.__bg_state)\n self.__status = self.__history[-1]", "def push_state(self, clock=None):\n if clock is None:\n clock = self.lamport_clock\n self.update_title(\"French75 - Unsaved Changes\")\n self.undo_sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pops and loads game state from history.
def _pop_history(self): current_state = self._state try: self._load_state(self._history.pop()) return current_state except IndexError: return None
[ "def get_new_gamestate(self):", "def game_state():\n data = load()\n return types.Game(data)", "def load_game_state(self, load_path : str) -> None:\n pass", "def new_game(self):\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.new_game()", "def rese...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same thing as Array.__getitem__, but returns None if coordinates are not within array dimensions.
def _get_none(self, x, y): try: return self[x, y] except ArrayError: return None
[ "def __getitem__(self, idx_tuple):\n assert len(idx_tuple) == self.num_dims(), \"Invalid number of array indices\"\n idx = self._compute_index(idx_tuple)\n assert idx is not None, \"Array indices out-of-range\"\n return self._elements[idx]", "def __getitem__(self, pos):\n if isi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets information about the surrounding locations for a specified coordinate. Returns a tuple of the locations clockwise starting from the top.
def _get_surrounding(self, x, y): coords = ( (x, y - 1), (x + 1, y), (x, y + 1), (x - 1, y), ) return filter(lambda i: bool(i[0]), [ (self._get_none(a, b), (a, b)) for a, b in coords ])
[ "def get_all_surrounding_positions(position):\n i, j = position\n return (\n (i-1, j-1), # lower left\n (i-1, j), # left\n (i-1, j+1), # upper left\n (i, j+1), # up\n (i+1, j+1), # upper right\n (i+1, j), # right\n (i+1, j-1), # lower right\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively traverses adjacent locations of the same color to find all locations which are members of the same group.
def _get_group(self, x, y, traversed): loc = self[x, y] # Get surrounding locations which have the same color and whose # coordinates have not already been traversed locations = [ (p, (a, b)) for p, (a, b) in self._get_surrounding(x, y) if p is loc an...
[ "def grasps_within_pile(color_mask):\n hue_counts, hue_pixels = get_hsv_hist(color_mask)\n\n individual_masks = []\n\n #color to binary\n focus_mask = color_to_binary(color_mask)\n\n #segment by hsv\n for block_color in hue_counts.keys():\n #same threshold values for number of objects\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Kills a group of black or white pieces and returns its size for scoring.
def _kill_group(self, x, y): if self[x, y] not in self.TURNS: raise BoardError('Can only kill black or white group') group = self.get_group(x, y) score = len(group) for x1, y1 in group: self[x1, y1] = self.EMPTY return score
[ "def remove(self, pieces):\n for piece in pieces:\n self.board[piece.row][piece.col] = None\n if piece.get_player() is Player.white:\n self.num_white_pieces -= 1\n if piece.is_king():\n self.num_white_kings -= 1\n\n elif piece....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively traverses adjacent locations of the same color to find all surrounding liberties for the group at the given coordinates.
def _get_liberties(self, x, y, traversed): loc = self[x, y] if loc is self.EMPTY: # Return coords of empty location (this counts as a liberty) return set([(x, y)]) else: # Get surrounding locations which are empty or have the same color # and whos...
[ "def _get_group(self, x, y, traversed):\n loc = self[x, y]\n\n # Get surrounding locations which have the same color and whose\n # coordinates have not already been traversed\n locations = [\n (p, (a, b))\n for p, (a, b) in self._get_surrounding(x, y)\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the number of liberties surrounding the group at the given coordinates.
def count_liberties(self, x, y): return len(self.get_liberties(x, y))
[ "def __get_contour_num(self,coord_tuple):\r\n boundRect = self.boundRect\r\n x,y = coord_tuple\r\n res = -1\r\n for i in range(len(self.top_contours)):\r\n if ((self.top_contours[i] == True) and (x >= boundRect[i][0]) and (x <= boundRect[i][0]+boundRect[i][2]) and\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes requests to retrieve all resources for `res_ids`, yielding each batch.
def gen_resources_for_ids( resource: Callable, res_ids: List[str], **list_params ) -> Generator[List, None, None]: print("Generating resources for ids.") total = len(res_ids) res_counter = 0 if "maxResults" not in list_params.keys(): list_params["maxResults"] = DEFAULT_MAX_RESULTS m...
[ "async def run_requests(self):\n loop = asyncio.get_event_loop()\n tasks = []\n async with aiohttp.ClientSession(connector=self.connector) as session:\n\n for index, id in enumerate(self.ids):\n if id not in self.processed_ids:\n url = self.base_url ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates `commentThreads` for the `videos`, yielding on every video.
def gen_comment_threads_for_videos( self, videos: List ) -> Generator[List, None, None]: print("Requesting comment threads for videos.") for video in videos: threads = self.get_comment_threads_for_video(video["id"]) yield threads return None
[ "def get_comment_threads(vid_id, data=None, next_page_tok=None):\n\n\tdef comments_list(parent_id):\n\t\t\"\"\"Get the comments from a particular thread\"\"\"\n\t\tapi_results = youtube.comments().list(\n\t\t\tpart \t\t= \"snippet\",\n\t\t\tparentId \t= parent_id,\n\t\t\ttextFormat \t= \"plainText\",\n\t\t).execute...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to take the output of the ArcGIS catchment delineation polygon shapefile and cathcment sites csv and return a shapefile with appropriately delineated polygons.
def agg_catch(catch_del_shp, catch_sites_csv, catch_sites_col=['GRIDCODE', 'SITE'], catch_col='GRIDCODE'): ## Catchment areas shp catch = read_file(catch_del_shp)[[catch_col, 'geometry']] ## dissolve the polygon catch3 = catch.dissolve(catch_col) ## Determine upstream catchments catch_df, sin...
[ "def extract_catchments(self, \n source, \n destination, \n flowlinefile, \n verbose = True,\n ):\n\n # make a list of the comids\n\n comids = self.get_comids(flowlinefile)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Catchment delineation using the REC streams and catchments. sites_shp Points shapfile of the sites along the streams.\n sites_col The column name of the site numbers in the sites_shp.\n catch_output The output polygon shapefile path of the catchment delineation.
def rec_catch_del(sites_shp, sites_col='site', catch_output=None): ### Parameters server = 'SQL2012PROD05' db = 'GIS' streams_table = 'MFE_NZTM_REC' streams_cols = ['NZREACH', 'NZFNODE', 'NZTNODE'] catch_table = 'MFE_NZTM_RECWATERSHEDCANTERBURY' catch_cols = ['NZREACH'] ### Modificatio...
[ "def agg_catch(catch_del_shp, catch_sites_csv, catch_sites_col=['GRIDCODE', 'SITE'], catch_col='GRIDCODE'):\n\n ## Catchment areas shp\n catch = read_file(catch_del_shp)[[catch_col, 'geometry']]\n\n ## dissolve the polygon\n catch3 = catch.dissolve(catch_col)\n\n ## Determine upstream catchments\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return prob(chisq >= chi, with df degrees of freedom). df must be even.
def chi2P(chi, df): assert df & 1 == 0 # If chi is very large, exp(-m) will underflow to 0. m = chi / 2.0 sum = term = exp(-m) for i in range(1, df//2): term *= m / i sum += term # With small chi and large df, accumulated # roundoff error, plus error in # the platform exp...
[ "def chisqprob(chisq, df):\n BIG = 20.0\n\n def ex(x):\n BIG = 20.0\n if x < -BIG:\n return 0.0\n else:\n return math.exp(x)\n\n if chisq <= 0 or df < 1:\n return 1.0\n a = 0.5 * chisq\n if df % 2 == 0:\n even = 1\n else:\n even = 0\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make a pair of functions flatten(tree) > x, unflatten(x) > tree
def flatten_and_unflatten(input_tree) -> Tuple[Callable, Callable]: tree_structure = tree_util.tree_structure(input_tree) leaf_shapes = [get_shape(leaf) for leaf in tree_util.tree_leaves(input_tree)] def flatten(tree): leaves = tree_util.tree_leaves(tree) flattened_leaves = [reshape(leaf,...
[ "def unflatten_tree(tree, xs):\n tree = _replace_nones(tree)\n\n return jax.tree_util.tree_unflatten(jax.tree_util.tree_structure(tree), xs)", "def unflatten(self, x):", "def flatten(self, x):", "def flatten(tree):\n if is_leaf(tree):\n return [tree]\n else:\n return sum([flatten(b) for ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
load all training data into a dictionary stored in order of X, u, L, W, k
def load_all(): training_data = dict() for i in range(7): training_data[i+1] = load_data(i+1) return training_data
[ "def training_data(self):\n if self._training_data is None:\n self._load_training_data()\n if self._swapped_training_data is None:\n self._swapped_training_data = {}\n for key, value in self._training_data.items():\n self._swapped_training_data[key] = va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
compile the training set corresponding to experiments listed in ind_list
def make_training_set(ind_list, training_data): exp = training_data[ind_list[0]] X_train = exp[0] u_train = exp[1] for i in ind_list[1:]: exp = training_data[i] X_train = np.append(X_train, exp[0], axis=0) u_train = np.append(u_train, exp[1], axis=0) return X_train...
[ "def train(self, input_vects):\n \n #Training iterations\n for iter_no in range(self._n_iterations):\n #Train with each vector one by one\n if iter_no % 20 == 0:\n print(iter_no)\n for input_vect in input_vects:\n self._sess.run(self._trai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lowers a list of TealBlocks into a list of TealComponents.
def flattenBlocks(blocks: List[TealBlock]) -> List[TealComponent]: codeblocks = [] references: DefaultDict[int, int] = defaultdict(int) indexToLabel = lambda index: "l{}".format(index) for i, block in enumerate(blocks): code = list(block.ops) codeblocks.append(code) if block.is...
[ "def breakFuelComponentsIntoIndividuals(self):\n fuels = self.getChildrenWithFlags(Flags.FUEL)\n if len(fuels) != 1:\n runLog.error(\n \"This block contains {0} fuel components: {1}\".format(len(fuels), fuels)\n )\n raise RuntimeError(\n \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the object_id is found in the db for the last 5 minutes, it retrieves this job's information and skips processing. If the object_id cannot be found in the db for the last 5 minutes, it saves it and sends it to a queue to be processed.
def process(object_id: str) -> Job: jobs = db.Jobs().get_by_object_id(object_id) job_processed_in_last_five_minutes = list( filter( lambda x: ( datetime.datetime.utcnow() - x.timestamp < datetime.timedelta(minutes=5) ), jobs, ) ) if job...
[ "def scheduleUpload(objectId, backend):", "def _refresh(self):\n \n if len(self.__data) or self.__killed:\n return\n \n if self.__id is None:\n message = Message.query(self.__query_options(),\n self.__collection.full_name,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Detects the immersion level based on the given features.
def detect(self, features): pass # TODO
[ "def detect(self, detect_img):\n features = self.classifier.detectMultiScale(detect_img,1.3,5)\n self.features = features\n self.features_detected = True", "def get_feature(glcm, featurelist=['contrast']):\n measure_list = dict(max_prob=0, contrast=0, dissimilarity=0, homogeneity=0, ASM=0,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Optimizes the EmotionsDetector model, trying to find the SVM parameters that would yield better results.
def optimize(self, args): ############################ # Get the data ############################ # Read the CSV file ignoring the header and the first column (which # contains the file name of the image used for extracting the data in # a row) try: ...
[ "def _optimize_svm_main(self, kernel):\n def svm_crossval(C, gamma):\n \"\"\"Wrapper of SVC cross validation.\n Notice how we transform between regular and log scale. While this\n is not technically necessary, it greatly improves the performance\n of the optimizer....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compile and install the fake library used for testing
def fake(ctx, clean=False): work_dir = join(PROJ_ROOT, "func", "dynlink") build_dir = join(PROJ_ROOT, "build", "libfake") clean_dir(build_dir, clean) build_cmd = [ "cmake", "-GNinja", "-DFAASM_BUILD_SHARED=ON", "-DFAASM_BUILD_TYPE=wasm", "-DCMAKE_TOOLCHAIN_FILE=...
[ "def test_project_with_dependencies(self):\n self.make_project()\n # 'test_library.zip' is not currently compiled for diorite.\n self.project.app_platforms = \"aplite,basalt,chalk\"\n self.project.save()\n tempdir = tempfile.mkdtemp()\n try:\n # Extract a premade...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create branches in the DAG.
def create_branches(branches, pcoll, provider_options): logger.info('Branch count: %i' % len(branches)) pcoll_tuple = () for branch in branches: logger.info('Adding branch') output = create_graph(branch, pcoll, provider_options) pcoll_tuple = pcoll_tuple + (output,) logger.info('Transform: MergeB...
[ "def step_create_branch_list(self, project_info):\n del project_info\n for branch in self.branches_data:\n branch_data = {}\n branch_data['commit_hash'] = branch['commit_hash']\n branch_data['branch_name'] = branch['name']\n branch_data['merge_target'] = bra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given pymongo databaseand a regex object (or string) for geo_id return (pubmed_id, geo_id)
def getPubmedIds(db, geo_id, limit=0): pm_tups = [] for ds in db.datasets.find({ '$or' : [{'reference_series':geo_id} , {"geo_id" :geo_id }]}).limit(limit): if 'pubmed_id' in ds: pm_tups.append((ds['pubmed_id'], ds['geo_id'])) pm_tups.append((ds['pubmed_id'], ds['reference_series...
[ "def map_mongo_to_sql_common(doc) -> Dict[str, Any]:\n return {\n MLWH_MONGODB_ID: str(doc[FIELD_MONGODB_ID]), # hexadecimal string representation of BSON ObjectId. Do ObjectId(hex_string) to turn it back\n MLWH_ROOT_SAMPLE_ID: doc[FIELD_ROOT_SAMPLE_ID],\n MLWH_RNA_ID: doc[FIELD_RNA_ID],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given a pubmed id, return a list of words from the given fields
def getWords(pubmed_id, fields=["MeshHeading" , "AbstractText", "ArticleTitle"]): def findText(anode): if anode.nodeType == anode.TEXT_NODE: return anode.data elif anode.hasChildNodes(): return ' '.join(map(findText, anode.childNodes)) else: return '' ...
[ "def qwords(id):", "def words(self, fields, normalizer_class):\n return sorted(set(itertools.chain.from_iterable(\n bib.raw_data(fields, normalizer_class)\n for bib in self.documents\n )))", "def get_page_words(parsed_hocr_page, pageid):\n page_words = []\n page_height ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given mongo db, geo_id and a list of words insert into word2geo collection
def insertWords(db, geo_id, words): def f( word): return {'geo_id' : geo_id, 'word': word} try: db.word2geo.insert(map( f, words)) except: print "error in " + geo_id print map( f, words)
[ "async def _insert_words(self, dict_words: List[DictWordModel]) -> NoReturn:\n docs = [word.dict() for word in dict_words]\n is_inserted = await self._db_client.try_insert_many(self._db_name, self._db_collection_name, docs)\n if not is_inserted:\n raise DBError('Failed to save many w...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a dictionary recording the min and max indices (indicating the position in a list) of documents for each review;
def build_indices(review_ids): review_indices = {} # Load qrel_abs_train txt file clef_data = pd.read_csv(config.TRAIN_QREL_LOCATION, sep="\s+", names=['review_id', 'q0', 'pmid', 'included']) # Get index of documents for each review for review_id in review_ids: index = clef_data.index[clef_data['review...
[ "def build_dict(sentences, max_words=50000):\n word_count = Counter()\n for sent in sentences:\n for w in sent.split(' '):\n word_count[w] += 1\n max_words = int(len(word_count.keys()) * 0.95)\n word_count['unk'] = MAX_NUM\n ls = word_count.most_common(max_words)\n logging.info('...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Redirect index to students page
def index() -> str: return redirect('/students')
[ "def index():\n return redirect(url_for('grade'))", "def index():\n return redirect(url_for('staff.dashboard'))", "def home_page():\n\n return redirect(\"/users\")", "def index_redirect():\n redirect('/ardublockly/index.html')", "def static_docs_index():\n redirect('/docs/Home/index.html')", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the total number of cards that given user owns of this card
def get_user_ownership_count( self, user: get_user_model(), prefetched: bool = False ) -> int: if prefetched: return sum( ownership.count for card_printing in self.printings.all() for localisation in card_printing.localisations.all() ...
[ "def get_user_ownership_count(\n self, user: get_user_model(), prefetched: bool = False\n ) -> int:\n if prefetched:\n return sum(\n ownership.count\n for localisation in self.localisations.all()\n for ownership in localisation.ownerships.all(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets whether this card has another half (flip, split, transform etc)
def has_other_half(self) -> bool: return self.layout in ( "flip", "split", "transform", "meld", "aftermath", "adventure", "modal_dfc", )
[ "def can_double(self):\n return len(self.cards) == 2", "def can_split(self) -> bool:\n if len(self.cards) == 2 and self.cards[0].value == self.cards[1].value:\n return True\n else:\n return False", "def half_check(self):\n k = len(self.matrix) * len(self.matrix...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the keyrune code that should be used for this printing In 99% of all cases, this will return the same value as printing.set.keyrune_code But for Guild Kit printings, the guild symbol should be used instead
def get_set_keyrune_code(self) -> str: if self.set.code in ("GK1", "GK2") and len(self.face_printings.all()) == 1: first_face = self.face_printings.all()[0] if first_face.watermark: return first_face.watermark return self.set.keyrune_code.lower()
[ "def getCode1Letter(self):\n dataDict = self.__dict__\n # NB must be done by direct access\n result = dataDict['code1Letter']\n return result", "def get_key_code(*args):\n return _ida_kernwin.get_key_code(*args)", "def get_character_code(self) -> int:\n return self._character_code", "def f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the total number of cards that given user owns of this printing
def get_user_ownership_count( self, user: get_user_model(), prefetched: bool = False ) -> int: if prefetched: return sum( ownership.count for localisation in self.localisations.all() for ownership in localisation.ownerships.all() ...
[ "def get_user_ownership_count(\n self, user: get_user_model(), prefetched: bool = False\n ) -> int:\n if prefetched:\n return sum(\n ownership.count\n for card_printing in self.printings.all()\n for localisation in card_printing.localisations....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies a change of the number of cards a user owns (can add or subtract cards)
def apply_user_change(self, change_count: int, user: get_user_model()) -> bool: if user is None or change_count == 0: return False try: existing_card = UserOwnedCard.objects.get( card_localisation=self, owner=user ) if change_count < 0 and...
[ "def update_player_turn_count(scorecard: int) -> int:\n scorecard -= 1\n return scorecard", "def adjust_for_ace(self):\n self.values[\"Ace\"] = 1\n self.value = 0\n for card in self.cards:\n self.value += self.values[card.rank]", "def final_checking_cards(self):\n ca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets most fitting image path for this localisation (the first face if there are multiple
def get_image_path(self) -> Optional[str]: try: return self.localised_faces.all()[0].get_image_path() except IndexError: logging.exception("Failed to find an image for %s", self) return None
[ "def get_best_face(self, image):\n\t\ttry:\n\t\t\treturn max(self.get_faces(image),\n\t\t\t key = (lambda f: f[1]))\n\t\texcept ValueError:\n\t\t\treturn None", "def best_path(self):\n\n trellis = self.timesteps\n observations = self.observations\n states = self.states\n time...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the date and meal for a menu, both from CLI and function calls. This method will only return a nonNone value if that's what the user specified, since more information then available at this point is necessary to make an automatic decision (namely, the menu date), in which case it returns None so that whichever f...
def _parse_args(input_date, input_meal): parser = ArgumentParser() parser.add_argument('-d', '--date', type=str) parser.add_argument('-m', '--meal', type=str) args = parser.parse_args() # Allows getting the args from either CLI or as the function parameters query_date = args.date or input_date ...
[ "def parse_menu(menufile_text):\n def make_dict(matches):\n data_dict = {}\n for key, value in matches:\n data_dict[key] = value\n return data_dict\n\n name_match = re.search(\"\\?package\\((.*?)\\)\", menufile_text)\n property_matches = re.findall(\"(\\w+)=\\\"(.*?)\\\"\", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a network from config file
def build_network(config): network_cfg = config['network'] network_name = network_cfg['name'] network_params = list(inspect.signature(eval(network_name).__init__).parameters)[1:] args = [f'{param}={network_cfg[param]}' for param in network_params if network_cfg.get(param)] try: model = e...
[ "def load_network_from_config(self, name):\n if name not in self._config[\"networks\"]:\n raise ValueError(\"Network %s is not in config file\" %(name))\n\n\n path_to_network_params = self._config[\"networks\"][name][\"path_to_network_params\"]\n path_to_network_params = utils.conver...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For the given installer conditions, verify the dependencies for every single one of the conditions that are in some way referenced in specs or source.
def test_verify_all_dependencies(self): for condition in self.all_references(): result = self.verify_dependencies(condition) if result: self.ill_defined[condition] = result else: self.well_defined.add(condition) return self.ill_defin...
[ "def test_verify_dependencies(self, cond_id, conditions):\n\n if not cond_id in conditions.get_keys():\n return 1\n else:\n result = self.verify_dependencies(cond_id)\n return result", "def test_conditional_dependencies_install(self):\n self.image_create(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies that the given condition id is defined, and that its' dependencies and their transitive dependencies are all defined and valid.
def test_verify_dependencies(self, cond_id, conditions): if not cond_id in conditions.get_keys(): return 1 else: result = self.verify_dependencies(cond_id) return result
[ "def _verify_dependencies(self, cond_id, undefined_paths, current_path):\n\n # Exception for izpack conditions:\n if cond_id in self.conditions.properties[WHITE_LIST]:\n return True\n\n # Short-circuit on well-defined conditions:\n if cond_id in self.well_defined:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the soup for a condition, test that its dependencies are validly defined.
def _verify_dependencies(self, cond_id, undefined_paths, current_path): # Exception for izpack conditions: if cond_id in self.conditions.properties[WHITE_LIST]: return True # Short-circuit on well-defined conditions: if cond_id in self.well_defined: return True ...
[ "def check(condition):", "def dependencies_met(self):\n for dep in self.dependencies.all():\n if not dep.criteria_met():\n return False\n return True", "def testConditionReasons(self):\n \n state = State.from_problem(self.prob)\n\n relevantVars = []\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests if a 'variable' type condition is correctly defined.
def test_variable(self, condition, undefined_paths, current_path): var = str(condition.find('name').text) if not var in self.variables.get_keys() and self.fail_on_undefined_vars: current_path += ((var, 'undefined variable'),) undefined_paths.add(current_path) return F...
[ "def isvar(var):\n return _coconut_tail_call(isinstance, var, (Const, Var))", "def _check_variable_definition(variable_name, variable_attrs):\n\n # Variable name must be type str\n if type(variable_name) != str:\n raise TypeError(\"Invalid variable name: \"+str(variable_name)+\" (must ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unzips a list of tuples, x.
def unzip(self, x): if (len(x)>0): return list(zip(*x)) else: return x, list()
[ "def unzip(pairs):\n return tuple(zip(*pairs))", "def unzip(\n iterable: typing.Iterable[typing.Tuple],\n) -> typing.Iterator[typing.Tuple]:\n return zip(*iterable)", "def Unzip(iterable: Iterable[Tuple[K, T]]) -> Tuple[Iterable[K], Iterable[T]]:\n lefts = []\n rights = []\n\n for left, right in ite...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns True if `obj` is changed or deleted on the database
def is_changed(obj): revision_field = get_version_fieldname(obj) version = get_revision_of_object(obj) return not obj.__class__.objects.filter(**{obj._meta.pk.name: obj.pk, revision_field: version}).exists()
[ "def object_change(self) -> bool:\n return id(self.fetch_live_object()) != id(self.object_ref)", "def is_live(self, obj):\n most_appropriate_object = get_appropriate_object_from_model(self.model)\n if most_appropriate_object == obj:\n return True\n return False", "def is_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Redefine the root graph for universe. It omits edges whose labels are in omit_edge_labels and also does not store references for nodes they point at. This is used mostly to get rid of uniquely identifying nodes.
def re_root(self, omit_edge_label: List[str]): self.leaves = {node for node in self.nodes if any([edge.label in omit_edge_label for edge in self.edges_to(node)])} root_nodes = self.nodes - self.leaves root_edges = {edge for edge in self.edges if edge.node_to in root_nodes ...
[ "def remove_all_node_labels(self):\n self.node_labels = []", "def reset_graph(self):\n raise NotImplementedError", "def _restoreGraph(self):\n\n # self.tempG = self.g.copy()\n\n if nx.is_directed(self.g):\n self.tempG = nx.DiGraph(self.g)\n else:\n self.t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the VNIStatsTableEntrySchema object attributes.
def __init__(self, py_dict=None): super(VNIStatsTableEntrySchema, self).__init__() self.update_arp = None self.query_arp = None if py_dict is not None: self.get_object_from_py_dict(py_dict)
[ "def _init_schema(self):\n self.sobjects = defaultdict(list)\n self.fields = defaultdict(list)\n self.package_versions = defaultdict(list)\n self.omit_sobjects = set()", "def __init__(self, py_dict=None):\n super(VtepTableEntrySchema, self).__init__()\n self.adapter_ip = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search through a table and return the first [row, column] pair who's value is None.
def find_unassigned_table_cell(table): for row in range(len(table)): for column in range(len(table[row])): if table[row][column] is None: return row, column return row, column
[ "def getFirstBlankEntry(self):\n for i in range(self.ROWS):\n for j in range(self.COLUMNS):\n if self.board[i][j] == 0:\n return (i,j)\n return None", "def get_first_selection(table, column_name):\n def replace(entry):\n if pd.isnull(entry):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create mock input block.
def fixture_input_block(): return Mock()
[ "def get_input_mock(inputs=None): # Use this mock if a contest requires interactive input.\n stdin_mock = MagicMock()\n stdin_mock.side_effect = inputs # You can emulate standard input lines by using this list.\n return stdin_mock", "def fixture_output_block():\n return Mock()", "def m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create mock output block.
def fixture_output_block(): return Mock()
[ "def test_output_results(self, mock_template):\n mock_template.return_value = 'Output: {{ data.foo }}'\n result = shell.format_output(\"127.0.0.1\", {'foo': 1})\n self.assertEqual(result, \"Output: 1\")", "def fixture_input_block():\n return Mock()", "def test_block_default_output(self):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates hexadecimal value of userentered base10 integer. As long as the remainder of the userentered base10 value and modulo 16 does not equal 0, the function stores the remainder in a queue and uses a dictionary to assign remainders 1015. Outputs the queue representation of the hex value at the end.
def hex_calc(value): hex_dict = { # Dictionary for hex values over 9 10: "A", 11: "B", 12: "C", 13: "D", 14: "E", 15: "F" } hex_stack = deque() # Queue to hold hexidecimal representation while value > 0: remainder = value % 16 ...
[ "def decimal_to_hexadecimal(number):\n if number >= 1 and number <= 10: #if the positive integer is less than 10, its binary form is itself\n print(number)\n else:\n \"\"\"\n divide number by 16, take the reminder and start again until the result is 0\n \"\"\"\n new_number =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates binary value of userentered base10 integer. As long as the remainder of the userentered base10 value and modulo 2 does not equal 0, the function stores the remainder in a queue. Outputs the queue representation of the binary value at the end.
def binary_calc(value): binary_stack = deque() # Queue to hold binary representation while value > 0: remainder = value % 2 binary_stack.append(remainder) # Add binary digit to queue value = value // 2 print("Binary Value: ", end="") while binary_stack: prin...
[ "def find_bin(n):\n # create and empty list to hold found binary numbers\n found_binary = []\n # initialize the queue\n queue = Queue()\n #begin by enqueueing 1, sine we're starting at 1 and going to n\n queue.enqueue(1)\n for i in range(n):\n found_binary.append(str(queue.dequeue()))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starting point for the program. Asks user for a base10, positive decimal integer and calls the binary_calc and hex_calc functions for computation. The program will loop, asking the user for a new number as long as they do not enter the string "quit".
def setup(): value = input("Enter a positive decimal integer (\"quit\" to stop): ") while value.lower() != "quit": binary_calc(int(value)) # Calls converter function on inputted value print("\n") hex_calc(int(value)) # Calls converter function on inputted value value = ...
[ "def main():\n welcome_message()\n continue_program = True\n num_calculations = 0\n # all the calculation options in the program\n calculation_options = [\"addition\", \"subtraction\", \"division\",\n \"multiplication\", \"exponents\", \"circle area\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FPA object setup, whatever that is. The only info from the problem is that it is a silicon detector, and we have a graph of quantum efficiency vs wavelength. Based on the graph, it is
def setup_fpa(): # it is a silicon detector. Based on the graph, the quantum efficiency # at 1.06 um is ~50%. fpa = {} fpa["quantum_efficiency"] = 0.5 return fpa
[ "def __init__(self, average_disparity, frame_down_factor, mem_down_factor,\n fovea_shape, frame_shape, values,\n verbose=False, memory_length=1, max_n_foveas=1, **bp_args):\n self.verbose = verbose\n self.use_uncertainty = False\n self.n_past_fovea = 0\n\n# ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts RGB image to a dataframe where each row corresponds to single pixel. If sample_count is set then only random subset of rows is returned.
def image_to_colorspace(image: np.array, sample_count: int = None) -> DataFrame: assert len(image.shape) == 3 and image.shape[2] == 3, "Image must be m x n x 3 dimensional RGB array" if sample_count: return DataFrame({'R':image[:,:,0].flatten(),'G':image[:,:, 1].flatten(),'B':image[:,:, 2].flatten(...
[ "def _get_sample_df(self, df, features, r):\n grouped = df.groupby('feature')\n df_sample = pd.DataFrame()\n for feature in features:\n group = grouped.get_group(feature)\n samples = group.sample(n=r)\n df_sample = df_sample.append(samples)\n return df_sa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts colorspace vector back to RGB image. Colorspace can have extra columns
def colorspace_to_image(cspace: DataFrame, m: int, n: int) -> np.array: assert isinstance(cspace, DataFrame), "Colorspace must be a dataframe" assert len(cspace) == m * n, 'Image dimensions must match' assert all(np.isin(['R', 'G', 'B'], cspace.columns)), "Colorspace must contain RGB columns" ...
[ "def color_convert(image, cspace='RGB'):\r\n if cspace == 'RGB':\r\n return np.copy(image)\r\n elif cspace == 'HSV':\r\n return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\r\n elif cspace == 'LUV':\r\n return cv2.cvtColor(image, cv2.COLOR_RGB2LUV)\r\n elif cspace == 'HLS':\r\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visualise colorspace vector as an interactive 3D figure. Colorspace can have extra columns By default RGB channels are clipped to the range [0,1]. Extra arguments can be used to control the appearance of ipyvolume.scatter
def show_colorspace(cspace: np.array, clip=True, size = 0.5, marker='sphere', **kwargs) -> None: assert isinstance(cspace, DataFrame), "Colorspace must be a dataframe" assert all(np.isin(['R', 'G', 'B'], cspace.columns)), "Colorspace must contain RGB columns" fig = ipv.figure() if clip: ipv.sc...
[ "def visualize_3d(self):\n cols = self.data.columns\n fig = px.scatter_3d(self.data,\n x=cols[0],\n y=cols[1],\n z=cols[2],\n color='label')\n fig.show()", "def visualize3d(input3darray...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that constructed signals are actual proportions.
def test_construct_signals_proportions(self): cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'), SIGNALS) assert np.all(cbg_df['completely_home_prop'].values <= 1) assert np.all(cbg_df['full_time_work_prop'].values <= 1) assert np.a...
[ "def check_proportions(self):\r\n\r\n proportions = [\r\n v['proportion'] for k, v in self.composition.items()\r\n ]\r\n\r\n if sum(proportions) < 1.0:\r\n raise ValueError('Sum of proportions between host and pathogen must be 1.0.')\r\n elif sum(proportions) > 1.0:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that aggregation at the county level creates nonzerovalued signals.
def test_aggregate_county(self): cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'), SIGNALS) df = aggregate(cbg_df, SIGNALS, 'county') assert np.all(df[f'{SIGNALS[0]}_n'].values > 0) x = df[f'{SIGNALS[0]}_se'].values assert...
[ "def test_aggregate_nation(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'nation')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that aggregation at the state level creates nonzerovalued signals.
def test_aggregate_state(self): cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'), SIGNALS) df = aggregate(cbg_df, SIGNALS, 'state') assert np.all(df[f'{SIGNALS[0]}_n'].values > 0) x = df[f'{SIGNALS[0]}_se'].values assert n...
[ "def test_aggregate_nation(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'nation')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that aggregation at the state level creates nonzerovalued signals.
def test_aggregate_nation(self): cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'), SIGNALS) df = aggregate(cbg_df, SIGNALS, 'nation') assert np.all(df[f'{SIGNALS[0]}_n'].values > 0) x = df[f'{SIGNALS[0]}_se'].values assert...
[ "def test_aggregate_state(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'state')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method for reading a nordic file and parsing it to a string array while also checking the integrity of the file(Will give errors when lines are too long). It also wil parse empty space on the file if it is too short.
def readNordicFile(f): nordics = [] emsg = "Nordic Read: The following line is too short: {0}\n{1}" i = 0 nordics.append([]) for line in f: if line.strip() == "" or line is None: if len(nordics[i]) == 0: continue i += 1; nordics.append([])...
[ "def test_file_read_converts_to_arr(self):\n input_arr = consume_input(self.test_input_file)\n self.assertEqual(\n input_arr,\n ['apple', 'c@at', 'orange', '2', 'ban!ana']\n )", "def read_file(fname, ncr):\n print('Reading clues from {}...'.format(fname))\n clues =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the text in the example's document in the given span.
def get_text_span(example, span): byte_positions = [] # `text` is a byte string since `document_plaintext` is also a byte string. start = span["plaintext_start_byte"] end = span["plaintext_end_byte"] text = byte_slice(example["document_plaintext"], start, end) for i in range(start, end): ...
[ "def get_span_text(self, span: Span) -> str:\n return self._text[span.begin: span.end]", "def substring(self, span):\n begin = span[0] - self.span.begin\n end = span[1] - self.span.begin\n return self.span.text[begin: end]", "def read_text_from_span_id(html, span_id):\n return htm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }