query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Computes a loss from a `model_output`, which represents the parameters of a given probability distribution for every ground truth value in `target`, and the `target` itself.
def _compute_loss(self, model_output: torch.Tensor, target: torch.Tensor) -> torch.Tensor: pass
[ "def _compute_lm_loss(self, output, target):\n # rescale with tau (temperature) and apply the log_softmax.\n scores = self.generator(self._bottle(output)) / self.lm_prior_tau\n scores = F.log_softmax(scores.to(torch.float32), dim=-1)\n\n src = target.detach().clone()\n src[src == ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples a prediction from the probability distributions defined by the specific likelihood model and the parameters given in `model_output`.
def _sample(self, model_output: torch.Tensor) -> torch.Tensor: pass
[ "def sample_prediction(self):\n\t\tnn_param_set = np.random.choice(self.nn_param_sets, p = self.posterior_weights)\n\t\tself.set_k_weights(nn_param_set)\n\t\treturn self.model.predict(self.x)", "def predict(model, X_test):", "def mock_predict(model_id):\t\n\n\tmodel_path = \"{}/m{}.pkl\".format(model_db_path, m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
So we have the left and right bounds of the view, we need to return all tiles that are in within these bounds. So the left side needs to look at tiles right attributes, the right side needs to look at tiles left side
def get_onscreen_tiles(self, view_left_bound, view_right_bound): tiles_group = pygame.sprite.Group() for tile in self._tiles: if self.is_onscreen(tile, view_left_bound, view_right_bound): tiles_group.add(tile) return tiles_group
[ "def getTilesInRect(self, rect, screenTileSize):\n tiles = []\n rectY = max(0, rect.y - 1)\n rectYMax = min(self.height, rectY + rect.height + 3)\n rectY = max(0, rectYMax - 3 - screenTileSize.height)\n\n\n rectX = max(0, rect.x - 1)\n rectXMax = min(self.width, rectX + rec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
begin a new instruction can append any combination of transfers, mixes, as long as all volumes belong to the same pipette range
def instruction_stream_cmdline(self): # get pipette type print("--------------------------------------------") print("All movements in this instruction group will share a pipette tip.\ \nand must be of the same command type and volume range.\ \ne.g. A group of transfers, using t...
[ "def populate_selection_transfer_junction():\n dbpagination=sddbpagination.DBPagination()\n\n transfer_without_selection=0\n transfer_without_dataset=0\n i=0\n transfers=dbpagination.get_files() # loop over block (trick not to load 300000 CTransfer objects in memory..). Size is given by pagination_bl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
insert a pipette instruction at the index [1,2,3,4,5], index = 2 [1,2,new,3,4,5]
def insert_pipette_instruction(self,index): removed_instructions = self.rewind(index) self.instruction_stream_cmdline() self.fast_forward(removed_instructions)
[ "def insert(self, i, x):", "def insert_cycler_instruction(self,index):\n removed_instructions = self.rewind(indeX)\n self.cycler_instruction()\n self.fast_forward(removed_instructions)", "def insert_before(self, func, index):\n self.procedure.insert(index, func)", "def dangerous_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
insert a cycler instruction at the index [1,2,3,4,5], index = 2 [1,2,new,3,4,5]
def insert_cycler_instruction(self,index): removed_instructions = self.rewind(indeX) self.cycler_instruction() self.fast_forward(removed_instructions)
[ "def insert_pipette_instruction(self,index):\n removed_instructions = self.rewind(index)\n self.instruction_stream_cmdline()\n self.fast_forward(removed_instructions)", "def insert(self, i, x):", "def dangerous_insert(x, i, my_list):\r\n return", "def insertary(self, chtr, index):\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rewind protocol state to an index in self.instructions [1,2,3,4,5], index = 2 ^ new self.instructions = [1,2] returns removed instructions [3,4,5]
def rewind(self,index): removed_instructions = [] for instruction in reversed(self.protocol.instructions[index:]): for group in reversed(instruction["groups"]): for transfer in group.get("transfer",{}): fromLoc = transfer["from"]["locName"] ...
[ "def d2_restore_state(program, pos1=12, pos2=2):\n program[1] = pos1\n program[2] = pos2", "def backstep(self):\n head, moves = self.history.pop()\n for i in range(self.N):\n if moves[i]:\n Tape._pop(self.stacks[moves[i] < 0][i])\n Tape._append(self.sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
step back through the list of removed instructions and add back to the protocol
def fast_forward(self,removed_instructions): for instruction in removed_instructions: for group in instruction["groups"]: if group.get("transfer"): fromLocs = [] toLocs = [] volumes = [] changeSettings ...
[ "def rewind(self,index):\n removed_instructions = []\n for instruction in reversed(self.protocol.instructions[index:]):\n for group in reversed(instruction[\"groups\"]):\n for transfer in group.get(\"transfer\",{}):\n fromLoc = transfer[\"from\"][\"locName\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the header part of packet Returns a dict
def _parse_header(head): # CALL1>CALL2,CALL3,CALL4,CALL5: # |from-|--to-|------path-------| # try: (fromcall, path) = head.split('>', 1) except: raise ParseError("invalid packet header") # looking at aprs.fi, the rules for from/src callsign # are a lot looser, causing a lot...
[ "def parse_header(self, header):\n # Should be 8 words long\n head_int = np.fromstring(header, dtype=np.uint32) \n\n hdict = self.header_dict\n\n t_ind = hdict['time']\n frame_ind = hdict['frame']\n stat_ind = hdict['station']\n link_ind = hdict['link']...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Looks for base91 telemetry found in comment field Returns [remaining_text, telemetry]
def _parse_comment_telemetry(text): parsed = {} match = re.findall(r"^(.*?)\|([!-{]{4,14})\|(.*)$", text) if match and len(match[0][1]) % 2 == 0: text, telemetry, post = match[0] text += post temp = [0] * 7 for i in range(7): temp[i] = base91.to_decimal(telemetr...
[ "def _extract_from_oom_text(self):\n\n for rec in [self.REC_INVOKED_OOMKILLER,\n self.REC_KILLED,\n self.REC_MEMINFO_1,\n self.REC_MEMINFO_2,\n self.REC_PAGECACHE,\n self.REC_PAGEINFO,\n self.REC...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build valence active space. Returns
def make_initial_active_space(self) -> Tuple[List[int], List[int]]: self.cas.make_valence_cas(self.molecule) return self.cas.occupation, self.cas.orbital_indices
[ "def _build_action_space(self):\n motor_mode = self._gym_config.simulation_parameters.motor_control_mode\n if motor_mode == robot_config.MotorControlMode.HYBRID:\n action_upper_bound = []\n action_lower_bound = []\n action_config = self._robot_class.ACTION_CONFIG\n for action in action_con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sort orbital indices with respect to their threshold value.
def _sort_orbitals_by_s1( self, thresholds_list: np.ndarray, orbitals_index: np.ndarray ) -> Tuple[np.ndarray, np.ndarray]: # sort arrays decreasing sortkey = np.argsort(-thresholds_list) thresholds_list = thresholds_list[sortkey] orbitals_index = orbitals_index[sortkey] ...
[ "def _sort(self):\n self.objects = self.objects[np.lexsort((self.dx, self.dy))[::-1]]", "def determine_long_short_pf1D_indicies_sort_by_peak(curr_active_pipeline, curr_any_context_neurons, debug_print=False):\n def _subfn_sort_desired(extant_arr, desired_sort_arr):\n \"\"\" \n Want to find...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for a plateau of s1 values. Returns
def _find_plateaus(self) -> Tuple[List[int], np.ndarray]: max_s1 = max(self.diagnostics.s1_entropy) print(f"Maximal single orbital entropy: {max_s1}") if self._excited_states_orbital_indices: orbitals_index = np.array(self._excited_states_orbital_indices) else: or...
[ "def search_plate(self):\n display(self.plate)", "def search_match_1(snip_fgp1):\n conn = psycopg2.connect(host=\"sculptor.stat.cmu.edu\", database=c.DB_USER,\n user=c.DB_USER, password=c.DB_PASSWORD)\n cur = conn.cursor()\n cur.execute(\"SELECT song_id FROM songs\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make active space by searching plteaus in s1 and reordering orbitals. Returns bool True if a plateau was found, e.g. a smaller active space; False if no plateau was found
def _make_active_space(self) -> bool: plateau_vector, orbitals_index = self._find_plateaus() # found plateau if len(plateau_vector) != 0: self.cas.get_from_plateau( plateau_vector, list(orbitals_index.tolist()) ) print(f"Found plateau, includi...
[ "def try_phase():\n global init_simp, smp_trace,aigs\n n = n_phases()\n print 'Phases = %d'%n\n## if ((n == 1) or (n_ands() > 45000) or init_simp == 0):\n if ((n == 1) or (n_ands() > 60000)):\n return False\n## init_simp = 0\n res = a_trim()\n## print hist\n print 'Trying phase abs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a list of active spaces for the large active space protocol in autoCAS. Returns
def get_large_active_spaces(self): if self.large_spaces.max_orbitals > len(self.cas.orbital_indices): print( f"""Large CAS protocol is not required here, but it will be done anyways with \nmax orbitals = number of orbitals/2 = {len(self.cas.orbital_indices)/2}""" ...
[ "def spaces(self):\n if not self._spaces or not self._client.cache:\n self._spaces = [\n Space(x, client=self._client, team=self)\n for x in self._client.get(f\"team/{self.id}/space\")[\"spaces\"]\n ]\n return self._spaces", "def listSpaces(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends day announcements stored in special_days.txt in the plugin config folder
def do_often(self): cur_date = date.today() if cur_date != self.last_announced_date: filepath = "plugins/config/special_days.txt" file = open(filepath,"r") self.days = file.readlines() file.close() cur_day_name = cur_date.strftime("%a")....
[ "def everyday(self):\n\n # Telephony data\n telephony.Calls.copyfiles(self.date_report)\n self.tp.get_data(self.date_report)\n tp_data = self.tp.report_data(self.date_report, self.date_report)\n\n self.bar.update(5)\n\n # Calltouch data\n self.ct.get_data(self.date_r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Always load star rating within meetings since poll forms can be opened anywhere now.
def need_star_rating(view, event): if view.request.meeting: star_rating.need()
[ "def _set_rating(self):\r\n try:\r\n self.rating = self.page.find('span', {'itemprop': 'ratingValue'})\r\n\r\n if self.rating:\r\n self.rating = self.rating.contents[0].strip()\r\n\r\n except Exception, e:\r\n raise IMDBException('Unable to retrieve rati...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a TaskDistribution that, on each reset, samples a different set of omniglot characters.
def create_omniglot_allcharacters_task_distribution(path_to_pkl, num_training_samples_per_class=10, num_test_samples_per_class=-1, num_training_classes=20, ...
[ "def init_random(self, N: int):\r\n self.taskNames = [f\"Task {i}\" for i in range(1, N+1)]\r\n self.workerNames = [f\"Worker {i}\" for i in range(1, N+1)]\r\n self.N = N\r\n self.G = np.matrix(np.random.randint(1,N,(N,N)),dtype=float)\r\n self.G0 = np.copy(self.G)", "def gen(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts ResourceID in the correct format
def scrub_resource_id(rsrc_id): rsrc_id = wipe_resource_id(rsrc_id) rsrc_id = "-".join( [rsrc_id[:8], rsrc_id[8:12], rsrc_id[12:16], rsrc_id[16:20], rsrc_id[20:32]] ) assert len(rsrc_id) == 36, "{} is not a valid Resource-ID".format(rsrc_id) return rsrc_id
[ "def _get_id(self, resource):\n if hasattr(resource, 'id'):\n resource_id = \\\n resource.id() if callable(resource.id) else resource.id\n elif hasattr(resource, 'get_id'):\n resource_id = resource.get_id()\n elif 'id' in resource:\n resource_id =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates a list of 2tuples for looping through `n` results with steps of size `step_size`.
def calc_loop_steps(n, step_size, offset=0): steps = (n - offset) // step_size remainder = (n - offset) % step_size moves = [ (offset + (step_size * s), offset + (step_size * (s + 1))) for s in range(steps) ] if remainder > 0: moves.append( (offset + (step_size * steps), ...
[ "def figurate_numbers(size):\n assert size >= 3\n step = size - 2\n n = 1\n d = n + step\n while True:\n yield n\n n += d\n d += step", "def do_chunkify(lst,n):\n return [lst[i::n] for i in range(n)]", "def build_list_with_step(length, step):\n lst = []\n i = 0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if timestamp is between timestamp_range (time1,time2)
def filtertime(timestamp, interval): T0, T1 = interval if (timestamp <= T1) and (timestamp >= T0): return True else: return False
[ "def in_bounds(t0, t1):\n assert t0 <= t1\n ends_before_bounds = t1 < start_time\n starts_after_bounds = t0 >= end_time\n return not (ends_before_bounds or starts_after_bounds)", "def is_between(element, h1, h2):\n\n g = time.gmtime(float(element['ts']))\n hour = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables multithread APIrequests for fast downloads of large datasets.
def enable_multithreading(self): print("Multi-Threaded API requests enabled.") self.multi_thread = True
[ "def multithread(files_to_download: list) -> None:\n download_threads = [] # List of all the Thread objects\n\n # creates multiple threads and start downloading\n for download_chunk in files_to_download: # Iterate the split list chunk...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns list of organizationtypes as available on `data.gov.in`
def list_org_types(self): return self.org_types
[ "def test_get_org_types(self):\n org_type_list = self.service.get_org_types()\n self.assertTrue(len(org_type_list))\n self.assertTrue(type(org_type_list[0]), OrganizationType)", "def license_types(self):\r\n\r\n if self._gis.version < [6,4]:\r\n return []\r\n\r\n url ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns list of organizationsnames as available on `data.gov.in`
def list_org_names(self): return self.org_names
[ "def gov_orgs():\n us_gov_github_orgs = set()\n\n gov_orgs_json = requests.get(\n \"https://government.github.com/organizations.json\",\n timeout=DEFAULT_REQUESTS_TIMEOUTS,\n ).json()\n\n us_gov_github_orgs.update(gov_orgs_json[\"governments\"][\"U.S. Federal\"])\n us_gov_github_orgs.up...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns list of sectors listed on `data.gov.in`
def list_sectors(self): return self.sectors
[ "def get_sector_stock_list():\n daily_records = import_daily_record(os.path.dirname(__file__) + '/data/daily_record.csv')\n stock_category = import_stock_categories()\n stock_list = daily_records.ticker.unique()\n sector_list = stock_category.sector.unique()\n return stock_list.tolist(), sector_list....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns list of resources updated in the last N days.
def list_recently_updated(self, days=7, max_results=10, print_results=True): TimeNow = int(time.time()) TimePast = TimeNow - int(86400 * days) TimeInterval = (TimePast, TimeNow) filtered_json = list( filter( lambda x: filtertime(list(x.values())[0], TimeInter...
[ "def get_updated_listings(session, date=None):\n if date:\n # Assume local time is being specified.\n last_updated = datetime_to_rets_time(date)\n else:\n last_updated = datetime_to_rets_time(get_updated_datetime())\n touch('updated')\n listing_search = create_listing_search(session...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns list of resources created in the last N days.
def list_recently_created(self, days=7, max_results=10, print_results=True): TimeNow = int(time.time()) TimePast = TimeNow - int(86400 * days) TimeInterval = (TimePast, TimeNow) filtered_json = list( filter( lambda x: filtertime(list(x.values())[0], TimeInter...
[ "def recent_polls(n=5, **kwargs):\n\tname = kwargs.get('name','No argument was passed.')\n\tquestions = Question.objects.all().order_by('-created_at')\n\treturn list(questions)[0:n]", "def _get_recent_files(self):\n num_days = 7\n file_list = []\n for i in range(num_days):\n x = da...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for a `data.gov.in` dataset resource using an organization type.
def search_by_org_type(self, query, max_results=10, print_results=True): if query in self.org_types: result_indices = np.ravel( [ list(item.values()) for item in self.assets.orgtype_idx_map if list(item.keys())[0] == query ...
[ "def search_records(self, domain, record_type, name=None, data=None):\r\n return domain.search_records(record_type=record_type,\r\n name=name, data=data)", "def test_view_can_search_for_org_by_name(self):\n\n org0 = {\n \"name\": \"BHive\",\n \"description\": \"A...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if we have a listlike indexer that is not a NamedTuple.
def is_list_like_indexer(key) -> bool: # allow a list_like, but exclude NamedTuples which can be indexers return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
[ "def is_typed_named_tuple(cls):\n return (\n issubclass(cls, tuple)\n and hasattr(cls, \"_fields\")\n and hasattr(cls, \"_field_types\")\n )", "def _is_named_tuple(x: Any) -> bool:\n t = type(x)\n b = t.__bases__\n if len(b) != 1 or b[0] != tuple:\n return False\n f =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate that value and indexer are the same length. An specialcase is allowed for when the indexer is a boolean array and the number of true values equals the length of ``value``. In this case, no exception is raised.
def check_setitem_lengths(indexer, value, values) -> None: # boolean with truth values == len of the value is ok too if isinstance(indexer, (np.ndarray, list)): if is_list_like(value) and len(indexer) != len(value): if not ( isinstance(indexer, np.ndarray) and...
[ "def _check_scatter_key_length(\n self, num_keys: int, value: Union[cudf.core.scalar.Scalar, ColumnBase]\n ):\n if isinstance(value, ColumnBase):\n if len(value) != num_keys:\n msg = (\n f\"Size mismatch: cannot set value \"\n f\"of si...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open CPTAC embargo details in web browser.
def embargo(): message = "Opening embargo details in web browser..." print(message, end = '\r') webbrowser.open("https://proteomics.cancer.gov/data-portal/about/data-use-agreement") print(" " * len(message), end='\r') # Erase the message
[ "def open(ctx, bibcodes, pdf):\n if ctx.obj[\"debug\"]:\n logger.setLevel(logging.DEBUG)\n # TODO: This is breaking up string if one item given from stdin.\n bibcodes = list(map(find_bibcode, bibcodes))\n logger.debug(f\"bibcodes: {bibcodes}\")\n if len(bibcodes) == 0:\n raise click.Usa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Give instructions for citing CPTAC datasets.
def how_to_cite(): print("If you use the API to generate results, please cite our manuscript describing the API - Lindgren et al. 2021, PMID:33560848, https://pubs.acs.org/doi/10.1021/acs.jproteome.0c00919") print('\n') print("For instructions on how to cite a specific dataset, please call its how_to_cite m...
[ "def citing_me():", "def citation(**kwargs):\n print_citation()", "def citation():\n\n cite = (\"To cite OSMnx, use:\\n\\n\"\n \"Boeing, G. 2017. OSMnx: New Methods for Acquiring, Constructing, Analyzing, \"\n \"and Visualizing Complex Street Networks. Computers, Environment and Urba...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
moves one rectangle [rect] going at [velocity]
def move_rect(rect, velocity): if rect.right > SCREEN_W or rect.left < 0: velocity[0] = -velocity[0] if rect.bottom > SCREEN_H or rect.top < 0: velocity[1] = -velocity[1] rect.move_ip(velocity[0], velocity[1])
[ "def update(self):\n self.rect.x += self.x_velocity\n self.rect.y += self.y_velocity", "def move(self):\n\n self.rect.x += self.x_speed\n self.rect.y += self.y_speed\n\n # makes he ball bounce off the wall\n if self.rect.left <= 0 or self.rect.right >= self.windowWidth:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
moves many rectangles. For each element in [rect_objs], we gather the rectangle and its velocity.
def move_rects(rect_objs): for shape in rect_objs: rect = shape['rect'] velocity = shape['velocity'] move_rect(rect, velocity)
[ "def move_objects(self):\n self.player.update()\n for obj in self.game_objects:\n obj.update()\n self.check_collisions()", "def draw_rects(rect_objs, window):\n for shape in rect_objs:\n if shape['visible']:\n rect = shape['rect']\n color = shape['co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
draws one rectangle [rect] with [color] on [window]
def draw_rect(rect, color, window): pygame.draw.rect(window, color, rect)
[ "def drawRectangle(self, canvas):", "def draw_rect(surface, fill_color, outline_color, rect, border=1):\n\tsurface.fill(outline_color, rect)\n\tsurface.fill(fill_color, rect.inflate(-border*2, -border*2))", "def draw(self):\n #There is only one turtle, so it needs updated to display each object's stored ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
draws many rectangles in [rect_objs] with its associated color if it is visible on [window]
def draw_rects(rect_objs, window): for shape in rect_objs: if shape['visible']: rect = shape['rect'] color = shape['color'] draw_rect(rect, color, window)
[ "def draw(self) -> None:\n\n self.window.fill(self.COLOUR_PALETTE[\"Black\"])\n\n for obj in self.objects:\n obj.draw(self.window, self.COLOUR_PALETTE)", "def draw_rects(frame: ndarray, rects: list, color: (int, int, int), thickness: int) -> None:\n for (pos_x, pos_y, width, height...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of generated rectangles. n is the number of rectangles to generate.
def gen_rects(n): output = [] for i in range(n): rect_w = random.randint(20, 75) rect_h = random.randint(20, 75) # use rect dimensions so they do not spawn over the screen x = random.randint(rect_w, SCREEN_W - rect_w) y = random.randint(rect_h, SCREEN_H - rect_h) ...
[ "def create_rectangles(self, rectangle: List[Tuple[int, int, Optional[str]]]) -> List[Rectangle]:\n return [\n Rectangle(str(index), rectangle[0], rectangle[1], rectangle[2] if len(rectangle) > 2 else None)\n for index, rectangle in enumerate(rectangle)\n ]", "def rect_list(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert an array of bits to bytes.
def bitarray_to_bytes(bitarray: List[int]) -> bytes: n_bits = len(bitarray) n_bytes = (n_bits + 7) >> 3 int_array = [0] * n_bytes for i in range(n_bits): int_array[i >> 3] |= bitarray[i] << (i & 7) return bytes(int_array)
[ "def _bit_list_to_bytes(bit_list):\n num_bits = len(bit_list)\n byte_vals = bytearray()\n for start in six.moves.xrange(0, num_bits, 8):\n curr_bits = bit_list[start:start + 8]\n char_val = sum(val * digit\n for val, digit in zip(_POW2, curr_bits))\n byte_vals.app...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert input bytes into an array of bits.
def bytes_to_bitarray(the_bytes: bytes, num_bits: int) -> List[int]: return [(the_bytes[i >> 3] >> (i & 7)) & 1 for i in range(num_bits)]
[ "def byte_to_bit_array(byte: int) -> [int]:\n return [int(i) for i in \"{0:08b}\".format(byte)]", "def __to_bits(data_byte):\n return [(int(data_byte) >> i) & 1 for i in range(0, 8)]", "def convert_bytes_to_bit_field(input_bytes):\n byte_list = list(input_bytes)\n byte_list.reverse()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a list of WSR bits.
def generate_wsr(num_bits: int) -> List[int]: return list(np.random.randint(2, size=num_bits))
[ "def genPWlist():\n pwList = []\n j = 1\n while j <= 10:\n nextPW = [genPW()]\n pwList = pwList + nextPW\n j += 1\n return pwList", "def gen_all_n_length_bitsrings(n):\n for i in range(1 << n):\n yield '{:0{}b}'.format(i, n)", "def ServosAsBits(servo_list):\n return s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a corpus of tweets.
def load(self, input_file): self.tweets = Tweets(input_file)
[ "def test_load_corpus(self):\r\n corpus_data = corpus.load_corpus('chatterbot.corpus')\r\n\r\n self.assertTrue(len(corpus_data))", "def load_corpus(self, dir):\n word_fn = codecs.open(dir + \"word.dic\", \"r\", \"utf-8\")\n for line in word_fn:\n word_nr, word = line.strip()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given options, this function collects tweets using the Streaming API and stores them in memory or on disk.
def collect(self, tracked_words, output_file=None, mode='a+', count=0, lang=["en-EN", "en", "en-CA", "en-GB"], locations=None): self.tweets = Tweets(output_file, mode) i = 0 while True: try: r = self.getStatusStream(tracked_words, lang, locations) ...
[ "def put_tweets(self):\n\t\tscreen_name = self.screen_name\n\t\tself.get_user_retweets()\n\t\tself.retweet_df[\"date\"] = pd.to_datetime(self.retweet_df['created_at']).dt.date\n\t\tself.retweet_df = self.retweet_df[self.retweet_df[\"date\"] >= self.__START_DATE]\n\t\tself.retweet_df = self.retweet_df.drop(\"date\",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter tweets from the loaded corpus by getting, for each term T in the list 'words', n tweets that contains T.
def filter(self, n, words, each_word=True, output_file=None, mode='a+', rt=True): self.filtered_tweets = Tweets(output_file, mode) self.words_filtered = set(words) # initialize count variables count = 0 # Process each tweet for tw in self.tweets: ...
[ "def getTweets(n, contains):\n tweets = []\n i = 0\n for tweet in tweepy.Cursor(api.search,\n q=contains + \"-filter:retweets\",\n rpp=100,\n result_type=\"mixed\",\n include_entities=True,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allows the user to easily label a corpus of tweets by giving the labels.
def label(self, labels, output_file=None, begin=0): # define the opening mode of the output file given the begin line o_mode = "w" if begin != 0: o_mode = "a+" self.labeled_tweets = Tweets(output_file, o_mode) count = 0 for tw in self.tweets: tw_la...
[ "def predict_label(texts, labels, text_new):\r\n # YOUR CODE HERE\r\n\r\n # texts = ['RT @GOPLeader', 'RT @GOPLeader', 'Colorless green ideas sleep furiously.']\r\n # labels = ['rep', 'rep', 'dem']\r\n\r\n train_twitter = texts\r\n test_twitter = text_new\r\n\r\n from sklearn.feature_extraction.te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get rancher environement ID. If using account key, return the environment ID specified by `name`.
def environment_id(self, name: str=None) -> str: if not name: r = self.s.get('{}/projects'.format(self.endpoint_url), params={'limit': 1000}) else: r = self.s.get('{}/projects'.format(self.endpoint_url), params={'limit': 1000, 'name': name}) r.raise_for_status() d...
[ "def get_environment(id=None, name=None):\n data = get_environment_raw(id, name)\n if data:\n return utils.format_json(data)", "def get_environment(self, environment_id):\n\n return self.murano_client.environments.get(environment_id)", "def get_env_variable_by_name(name):\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get rancher service info by given environment id and service name.
def service_info(self, environment_id: str, stack_name: str, service_name: str) -> Dict: if not environment_id: raise Exception('Empty rancher environment ID') r = self.s.get('{}/projects/{}/stacks'.format(self.endpoint_url, environment_id), params={'limit': 1000, 'nam...
[ "def get_service(self, id):\n return self._request('get', path='/services/{}'.format(id), value_only=True)", "def get_service(self):\n\n print \"\\nLooking up services for pod\\n\"\n\n api_url = \"/api/v1/services\"\n if (str(self.args.namespace) != \"None\") & \\\n (str(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finish service upgrade when service is in `upgraded` state.
def service_finish_upgrade(self, environment_id: str, service_id: str) -> Dict: r = self.s.get('{}/projects/{}/services/{}'.format(self.endpoint_url, environment_id, service_id)) r.raise_for_status() data = r.json() if data.get('type') == 'error': raise Exception(json.dumps(d...
[ "def stop_upgrade(self, upgrade_task, service):\n pass", "def pause_upgrade(self, upgrade_task, service):\n pass", "def post_service_upgrade_hook(self, upgrade_task, service):\n return tasks.noop.si()", "def check_service_upgrade():\n # Upgrades to be performed before starting the serv...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read from file a dictionary mapping ICD9 codes to their descriptions.
def get_icd9_descript_dict(path): lines = _read_file(path) icd9_descript_dict = {} for l in lines[1:]: # ignore first line which is column names elems = l.split('\t') try: assert len(elems) == 8 # number of columns should be 8 except: print('Problem with f...
[ "def read_description(filelike_or_filepath):\n if isinstance(filelike_or_filepath, str):\n filelike = open(filelike_or_filepath)\n else:\n filelike = filelike_or_filepath\n\n with filelike:\n mapping = parse_description(filelike.read())\n\n return mapping", "def read_dic(filepath)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clean up and annotates data; converts data into standard (x, y) form.
def _clean_data(data, icd9_descript_dict, no_onset_age=True): x_raw, y_raw = [], [] for idx, line in enumerate(data): line = line.split() try: features = [] features.append('age_' + line[RAW_AGE_COL]) features.append('gender_' + line[RAW_GENDER_COL]) ...
[ "def data_to_axes(self, x, y):\n # pylint: disable=E0633 # unpacking-non-sequence\n x_geo, y_geo = self.data_to_geo(x, y)\n x, y = self.geo_to_axes(x_geo, y_geo)\n return (x, y)", "def axes_to_data(self, x, y):\n # pylint: disable=E0633 # unpacking-non-sequence\n x_geo,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode features and classes to integer indices using given dictionaries.
def _encode(x_raw, y_raw, feat_idx_dict, class_idx_dict): x_unvec = [[feat_idx_dict[feat] for feat in line] for line in x_raw] y = [class_idx_dict[c] for c in y_raw] assert len(x_unvec) == len(y) return x_unvec, y
[ "def feature_int_map(all_features):\n feature_to_int = {}\n int_to_feature = []\n i = 0\n for feature in all_features:\n feature_to_int[feature] = i\n i += 1\n int_to_feature.append(feature)\n return int_to_feature, feature_to_int", "def make_indices(vocabulary): # TODO: write ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the cost function for the model's parameter.
def compute_cost(self, model_param): loss, _ = self.parameters.cost_model.cost(model_param) return loss.item()
[ "def cost(params):\n\n # get the F(x) response\n Fx = model(params)\n\n # compute goodness of fit\n return scale * (Fx - G)**2", "def _get_cost_function(self):\n self.weights = tf.placeholder(\n tf.float32, shape=[self.n_words, self.n_words])\n self.log_coincidence = tf.placeh...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the pointgroup for this spacegroup, e.g. P422 for P43212.
def get_pointgroup(name): space_group = sgtbx.space_group_info(name).group() point_group = ( space_group.build_derived_patterson_group().build_derived_acentric_group() ) return point_group.type().lookup_symbol().replace(" ", "")
[ "def get_process_group(self, name: str): \r\n process_group = nipyapi.canvas.get_process_group(name)\r\n return process_group", "def placement_group_id(self) -> Optional[str]:\n return pulumi.get(self, \"placement_group_id\")", "def get_pipeline_group(self, group_name):\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number corresponding to this spacegroup.
def spacegroup_name_to_number(spacegroup): # check have not had number passed in try: number = int(spacegroup) return number except ValueError: pass return sgtbx.space_group_info(str(spacegroup)).type().number()
[ "def get_group_idx(self) -> int:\n return self.group_idx", "def _get_space_group(s: Structure) -> int:\n try:\n return int(np.round(s.get_space_group_info()[1]))\n except TypeError:\n # 0 should be fine as it is not taken\n return 0", "def groupNum(self, x, y) :\n\t\treturn int...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the number of symmetry operations that spacegroup number has.
def get_num_symops(spacegroup_number): return len(sgtbx.space_group_info(number=spacegroup_number).group())
[ "def symmetry_number(point_group):\n point_group = point_group.strip().lower()\n\n if point_group in {\"c1\", \"ci\", \"cs\", \"c∞v\", \"k\", \"r3\"}:\n symmetry_number = 1\n elif point_group in {\"c2\", \"c2v\", \"c2h\", \"d∞h\", \"s4\"}:\n symmetry_number = 2\n elif point_group in {\"c4\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test add_payload method in modify_jwt.py
def test_add_payload(): with pytest.raises(InvalidJwtJson): add_payload({}, {}) with pytest.raises(InvalidParam): add_payload(jwt_json, "") new_jwt_json = add_payload(jwt_json, add_payload_value) assert list(new_jwt_json[PAYLOAD].keys()) == ["login", "username"] assert new_jwt_json...
[ "def test_change_payload():\n with pytest.raises(InvalidJwtJson):\n change_payload({}, {})\n\n new_jwt_json = change_payload(jwt_json, add_payload_value)\n\n assert new_jwt_json[PAYLOAD] == jwt_json[PAYLOAD]\n\n assert new_jwt_json[HEADER] == jwt_json[HEADER]\n assert new_jwt_json[SIGNATURE] =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test add_header method in modify_jwt.py
def test_add_header(): with pytest.raises(InvalidJwtJson): add_header({}, {}) with pytest.raises(InvalidParam): add_header(jwt_json, "{}") new_jwt_json = add_header(jwt_json, add_header_value) assert list(new_jwt_json[HEADER].keys()), ["typ", "alg", "kid"] assert new_jwt_json[HEADE...
[ "def update_headers(self, new_header):\n headers = f\"Test-Name: {new_header}\"\n self.webhook.additional_headers = headers\n self.webhook.validated_save()", "def mocked_get_threescale_account_secret_header():\n return \"secret\"", "def test_add_payload():\n with pytest.raises(Invalid...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test changeAlg method in modify_jwt.py
def test_change_alg(): with pytest.raises(InvalidJwtJson): change_alg({}, "test") new_jwt_json = change_alg(jwt_json, "test") assert new_jwt_json[HEADER]["alg"], "test" assert new_jwt_json[PAYLOAD] == jwt_json[PAYLOAD] assert new_jwt_json[SIGNATURE] == jwt_json[SIGNATURE]
[ "def test_update_secret(self):\n pass", "def test_signature():\n with pytest.raises(InvalidJwtJson):\n signature({}, \"\")\n\n jwt_json = {\n HEADER: {\"typ\": \"JWT\", \"alg\": \"none\"},\n PAYLOAD: {\"login\": \"az\"},\n SIGNATURE: \"\",\n }\n new_jwt = signature(j...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test changePayload method in modify_jwt.py
def test_change_payload(): with pytest.raises(InvalidJwtJson): change_payload({}, {}) new_jwt_json = change_payload(jwt_json, add_payload_value) assert new_jwt_json[PAYLOAD] == jwt_json[PAYLOAD] assert new_jwt_json[HEADER] == jwt_json[HEADER] assert new_jwt_json[SIGNATURE] == jwt_json[SIG...
[ "def test_add_payload():\n with pytest.raises(InvalidJwtJson):\n add_payload({}, {})\n\n with pytest.raises(InvalidParam):\n add_payload(jwt_json, \"\")\n\n new_jwt_json = add_payload(jwt_json, add_payload_value)\n assert list(new_jwt_json[PAYLOAD].keys()) == [\"login\", \"username\"]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test signature method in modify_jwt.py
def test_signature(): with pytest.raises(InvalidJwtJson): signature({}, "") jwt_json = { HEADER: {"typ": "JWT", "alg": "none"}, PAYLOAD: {"login": "az"}, SIGNATURE: "", } new_jwt = signature(jwt_json, "") assert new_jwt == jwt jwt_json_test = jwt_to_json(jwt_rsa...
[ "def test_change_payload():\n with pytest.raises(InvalidJwtJson):\n change_payload({}, {})\n\n new_jwt_json = change_payload(jwt_json, add_payload_value)\n\n assert new_jwt_json[PAYLOAD] == jwt_json[PAYLOAD]\n\n assert new_jwt_json[HEADER] == jwt_json[HEADER]\n assert new_jwt_json[SIGNATURE] =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open specified layers in model for training while keeping other layers frozen.
def open_specified_layers(model, open_layers): if isinstance(model, nn.DataParallel): model = model.module # for layer in open_layers: # assert hasattr(model, layer), "'{}' is not an attribute of the model, please provide the correct name".format(layer) for name, module in model.named_chil...
[ "def freeze(model):\n for layer in model.layers:\n layer.trainable = False\n return model", "def freeze_all_but_top(self):\r\n # first: train only the top layers (which were randomly initialized)\r\n # i.e. freeze all convolutional InceptionV3 layers\r\n for layer in self.model.layers:\r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads config from the given paths, with later paths taking precedence over earlier ones. A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT section, and be available for use in substitutions. The caller may override some of these seed values.
def load( cls, config_paths: List[str], *, seed_values: Optional[SeedValues] = None, ) -> Union["_EmptyConfig", "_ChainedConfig"]: @contextmanager def opener(f): with open(f, "rb") as fh: yield fh return cls._meta_load(opener, config_paths, seed_values=s...
[ "def load(\n cls, config_paths: List[str], *, seed_values: Optional[SeedValues] = None,\n ) -> Union[\"_EmptyConfig\", \"_ChainedConfig\"]:\n\n @contextmanager\n def opener(f):\n with open(f, 'rb') as fh:\n yield fh\n\n return cls._meta_load(opener, config_paths, seed_values=seed_values)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves option from the specified section (or 'DEFAULT') and attempts to parse it as type. If the specified section does not exist or is missing a definition for the option, the value is looked up in the DEFAULT section. If there is still no definition found, the default value supplied is returned.
def get(self, section, option, type_=str, default=None): return self._getinstance(section, option, type_, default)
[ "def get_value(self, section: str, option: str) -> Optional[str]:", "def get(self,\n section,\n option):\n return self.__parser.get(section=section, option=option)", "def get(self, section, option):\r\n return eval(ConfigParser.RawConfigParser.get(self, section, option))", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the underlying singlefile configs represented by this object.
def configs(self) -> Sequence["_SingleFileConfig"]:
[ "def get_config_file(self):\n return self.__config", "def _configs(self):\n return self.dm.configs", "def get_config_file_contents(self):\n return self._contents", "def config_sftp(self):\r\n return self._config_sftp", "def settings_files(self) -> Sequence:\n return self._...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the sources of this config as a list of filenames.
def sources(self) -> List[str]:
[ "def _getSourcesList(self):\n\t\tif self._sources is not None:\n\t\t\treturn self._sources\n\n\t\tsources = []\n\t\tfiles = os.listdir(self._app_dir)\n\t\tfor file in files:\n\t\t\tname, ext = os.path.splitext(file)\n\t\t\tif ext == \".py\":\n\t\t\t\tsources.append(file)\n\t\treturn sources", "def sources(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the value of the option in this config as a string, or None if no value specified.
def get_value(self, section: str, option: str) -> Optional[str]:
[ "def get_value(self) -> str:\n text = self.combo.currentText()\n return self.options[text]", "def get_selenoid_option(self, option_name):\n try:\n option_value = literal_eval(self.driver_wrapper.config.get('Capabilities', 'selenoid:options'))[option_name]\n except Exception:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the path to the source file the given option was defined in.
def get_source_for_option(self, section: str, option: str) -> Optional[str]:
[ "def source_option(option_name):\n return f'source.{option_name}'", "def source_path(self):\n return self._data.get('source_path')", "def construct_file_path(options):\n\n # if path already contains file name return \n if options.with_fn:\n return options.src_path\n \n fn = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
All options defined for the section.
def options(self, section: str) -> List[str]:
[ "def keys(self, section):\n return self.parser.options(section)", "def options(self):\n return self.data['options']", "def options(self) -> List[OptionInfo]:\n return []", "def set_general_options(self):\n\n for option, td in self.OPTIONS.items():\n self.set_option(optio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
All the DEFAULT values (not interpolated).
def defaults(self) -> Mapping[str, str]:
[ "def get_default_values(self):\n default_values = []\n for joint in self.joints:\n default_values.append(self.joints[joint]['default_value'])\n return default_values", "def set_all_defaults(self):\n for key, param in self.parameters.items():\n valdict = self.param...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine if the value is actually an option belonging to that section. A value that looks like an option might actually be a subscope, e.g. the option value `java` belonging to the section `cache` could actually be the section `cache.java`, rather than the option `cachejava`. We must also handle the special syntax of ...
def _is_an_option(option_value: Union[_TomlValue, Dict]) -> bool: if isinstance(option_value, dict): return "add" in option_value or "remove" in option_value return True
[ "def __contains__(self,opt):\n\t\tfor n, d in self.options:\n\t\t\tif n == opt:\n\t\t\t\treturn True\n\t\treturn False", "def completing_subcommand_option_util(self, option, words):\n # Example: Return True for: gh view 1 --pag\n if len(words) > 3:\n if option in words:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the values for a section, if any.
def _find_section_values(self, section: str) -> Optional[Dict]: def recurse(mapping: Dict, *, remaining_sections: List[str]) -> Optional[Dict]: if not remaining_sections: return None current_section = remaining_sections[0] if current_section not in mapping: ...
[ "def get_section(section):", "def search(tokens, section):\n for t in tokens:\n if t[0] == section:\n return t[1:]\n return []", "def _extract_section(section_content):\n lines = section_content.split(\"\\n\")\n\n section_dict = OrderedDict()\n for line in lines:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For any values with %(foo)s, substitute it with the corresponding value from DEFAULT or the same section.
def _possibly_interpolate_value( self, raw_value: str, *, option: str, section: str, section_values: Dict, ) -> str: def format_str(value: str) -> str: # Because dictionaries use the symbols `{}`, we must proactively escape the symbols so that # .format() does not try to imp...
[ "def get(self, key: str, default: str = \"\") -> str:\n try:\n return self.resolved[key]\n except KeyError:\n tmp = self.config[self.section].get(key, default)\n for subst_key, subst_value in self.substitutions.items():\n tmp = tmp.replace(subst_key, sub...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce the selection to a single uppermost empty interval.
def empty(doc, selection, selectmode=None): beg = selection[0][0] return Selection(Interval(beg, beg))
[ "def _drop_empty_intervals(self):\n keep_interval_ids = np.argwhere(self.lengths).squeeze().tolist()\n self._abscissa.support = self._abscissa.support[keep_interval_ids]\n return self", "def emptyafter(doc, interval, selectmode=None):\n _, end = interval\n return Interval(end, end)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the empty interval before each interval.
def emptybefore(doc, interval, selectmode=None): beg, _ = interval return Interval(beg, beg)
[ "def _drop_empty_intervals(self):\n keep_interval_ids = np.argwhere(self.lengths).squeeze().tolist()\n self._abscissa.support = self._abscissa.support[keep_interval_ids]\n return self", "def nondegenerate_intervals(self):\n return self.__nondegenerate", "def emptyafter(doc, interval,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the empty interval after each interval.
def emptyafter(doc, interval, selectmode=None): _, end = interval return Interval(end, end)
[ "def _drop_empty_intervals(self):\n keep_interval_ids = np.argwhere(self.lengths).squeeze().tolist()\n self._abscissa.support = self._abscissa.support[keep_interval_ids]\n return self", "def all_intervals(self):\n\t\t# TODO any more elegant / efficient way of doing this?\n\t\tall_intervals = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move each interval one line down. Preserve fully selected lines.
def movedown(doc, interval, reverse=False): beg, end = interval if end - beg > 0: currentline = selectfullline(doc, Interval(end - 1, end)) else: currentline = selectfullline(doc, Interval(end, end)) if not reverse: nextline = selectnextfullline(doc, currentline) else: ...
[ "def trimJoin_Coro(self):\n offsetLines = []\n moveEnd = yield\n moveStart = yield\n while not(moveStart is None):\n _, point = moveEnd.segmentsIntersect(moveStart, c.ALLOW_PROJECTION)\n moveEnd = l.Line(moveEnd.start, point, moveEnd)\n moveStart = l.Line...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Since TCP is a streamorientated protocol, responses aren't guaranteed to be complete when they arrive. The buffer stores all the data and this function splits the data into replies based on the new line delimiter.
def parse_buf(self, encoding="unicode"): buf_len = len(self.buf) replies = [] reply = b"" chop = 0 skip = 0 i = 0 buf_len = len(self.buf) for i in range(0, buf_len): ch = self.buf[i:i + 1] if skip: skip -= 1 ...
[ "def _recv_line(self):\n msg_line = ''\n # Retrieve an complete line end with CRLF.\n while 1:\n line = self.buffer.readline()\n msg_line += line\n if line[-2:] == CRLF: break\n printd(msg_line)\n # Remove the ending CRLF.\n return msg_line[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the function which handles retrieving new data chunks. It's main logic is avoiding a recv call blocking forever and halting the program flow. To do this, it manages errors and keeps an eye on the buffer to avoid overflows and DoS attacks.
def get_chunks(self, fixed_limit=None, encoding="unicode"): # Socket is disconnected. if not self.connected: return # Recv chunks until network buffer is empty. repeat = 1 wait = 0.2 chunk_no = 0 max_buf = self.max_buf max_chunks = self.max_c...
[ "def handle_read(self):\n\t\tself.data_buffer.append(self.recv(8192))", "def worker():\r\n unprocessed=bytes()\r\n while True:\r\n try:\r\n chunk = self.socket.recv(2048)\r\n if len(chunk)==0: \r\n break\r\n els...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a week model instance for the current week
def current_week(self): try: week = Week.objects.get( week_start__lte=datetime.date.today(), week_end__gte=datetime.date.today() ) except Week.DoesNotExist: week = Week.objects.get(week_number=1) return week
[ "def current_week(self):\n return floor(abs((datetime.now().date() - datetime.fromtimestamp(self.start_date).date()).days / 7))", "def current_week(self):\n return floor(abs((datetime.now().date() - self.start_date).days / 7))", "def week():", "def next(self):\n return Week.for_date(self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a queryset of picks for a matchup_id and nfl_team_id
def get_picks_by_matchup_nflteam(self, matchup_id, nfl_team_id): return super(PicksManager, self).get_query_set().filter(matchup_id_id=matchup_id, nfl_team_id_id=nfl_team_id)
[ "def get_matches_for_team(team):\n fixture = Round.all()\n matches = [m for m in itertools.chain(*(r.matches for r in fixture))\n if m.home == team or m.away == team]\n return matches", "def get_team_matchups(self, team_id):\n team_key = self.get_league_key() + \".t.\" + str(team_id)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of wins for a user_id and week_number
def get_wins_count_by_user_week(self, user_id, week_number): return super(PickViewManager, self).get_query_set().filter(user_id=user_id, week_number=week_number, won_pick=1).count()
[ "def wins(self):\n if self.unique_id in winners_database:\n return winners_database[self.unique_id].wins\n return 0", "def weeksWorked(couriersWeeklyData):\r\n\r\n\tn_weeks_worked = couriersWeeklyData[['week']].groupby(couriersWeeklyData.courier).count().reset_index()\r\n\tn_weeks_worked....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return true/false if user has losses for a week
def has_no_losses(self, user_id, week_number): # get all completed matchups for the user and week matchups_completed = super(PickViewManager, self).get_query_set().filter(user_id=user_id, week_number=week_number, matchup_completed=True) if matchups_completed.exists(): # get all compl...
[ "def _check_week_stop(self, date):\n\n weekstop = False\n ## Friday\n if date.weekday() == 4 and date.hour >= 20:\n weekstop = True\n ## Saturday\n elif date.weekday() == 5:\n weekstop = True\n ## Sunday\n elif date.weekday() == 6:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the margin of coverage for a user_id and week_number
def calc_margin_of_coverage(self, user_id, week_number, no_losses): total_margin_of_coverage = 0 if no_losses: picks = super(PickViewManager, self).get_query_set().filter(user_id=user_id, week_number=week_number) for pick in picks: moc = 0 if pick....
[ "def get_wins_count_by_user_week(self, user_id, week_number):\n return super(PickViewManager, self).get_query_set().filter(user_id=user_id, week_number=week_number, won_pick=1).count()", "def calc_user_coverage(self):\n users_in_recs = set(self.recos['userid'])\n users_test = set(self.data_te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to add a template to the template dictionary and array Templates to be used as Poissonian models can be added as counts or flux. In either case the map must account for the point spread function before addition.
def add_template(self, template, label, units='counts'): if units == 'flux': assert (len(self.exposure_map) != 0), \ "Must provide exposure map before adding a flux template" assert (len(self.exposure_map) == len(template)), \ "Template must be the same s...
[ "def addTemplate(\n self, sources, class_id, object_mask\n ) -> Tuple[retval, bounding_box]:\n ...", "def AddInputTemplate(self, templ):\n self._inputTemplates.append(templ)", "def init_templates():\n\n templates = []\n\n # single stroke templates (all fingers doing the same if var...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compress data, exposure and templates Before calling must have loaded data, exposure, templates and mask
def compress_data_and_templates(self): # Check user has loaded data, exposure and templates assert((len(self.count_map) != 0) | (len(self.exposure_map) != 0)), \ "Must load a count and exposure map before setting up the scan" assert(len(self.templates) != 0), \ "Must loa...
[ "def compress(self, pdb, data, **kwargs):\n\n filename = self.stage.filename(pdb, **kwargs)\n self.logger.debug('Compressing %s', filename)\n if kwargs.get('dry_run'):\n return True\n\n temp_output_file = os.path.join(self.config['locations']['fr3d_root'],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Divide the ROI into nexp different regions of similar exposure
def divide_exposure(self): # Determine the pixels of the exposure regions pix_array = np.where(self.mask_total == False)[0] exp_array = np.array([[pix_array[i], self.exposure_map[pix_array[i]]] for i in range(len(pix_array))]) array_sorted = exp_array[np.arg...
[ "def FindAdaptiveROI(image, center_ROI, aspr_ROI,displayImages, debug = True):\n #inputfilename = 'img6.png'\n #outputfilename = 'edge2.png'\n #nucleation_down = 1 # 0 for nucleation up\n #center_ROI = (511,672) #center of the object to be identified\n #aspr_ROI = 2/3 # x_width/y_width for ROI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take input map, return masked compressed version and if expreg = True a set of such maps broken into exposure regions
def return_masked_compressed(self, map_to_mask, expreg=False): if not expreg: temp_masked_map = ma.masked_array(data=map_to_mask, mask=self.mask_total) return temp_masked_map.compressed() else: temp_masked_map_list = [] ...
[ "def collapse_cr_map(dq_map):\n nints, ngroups, ny, nx = dq_map.shape\n\n # Create an array containing all group indexes\n all_groups = np.zeros((1, ngroups, 1, 1), dtype=np.int)\n all_groups[0, :, 0, 0] = np.arange(ngroups)\n intermediate1 = np.repeat(all_groups, nints, axis=0)\n intermediate2 = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a filename (str) and key (bytes), it encrypts the file and write it
def encrypt(filename, key): f = Fernet(key) with open(filename, "rb") as file: # read all file data file_data = file.read() # encrypt data encrypted_data = f.encrypt(file_data) # write the encrypted file with open(filename, "wb") as file: file.write(encrypted_data)
[ "def encrypt_file(ctx, filename, key):\n\n click.echo(f\"Encrypting {filename}...\")\n\n # opening the key\n with open(key, 'rb') as file_key:\n key = file_key.read()\n\n # using the generated key\n fernet = Fernet(key)\n\n # opening the original file to encrypt\n with open(filename, 'rb...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a filename (str) and key (bytes), it decrypts the file and write it
def decrypt(filename, key): f = Fernet(key) with open(filename, "rb") as file: # read the encrypted data encrypted_data = file.read() # decrypt data decrypted_data = f.decrypt(encrypted_data) # write the original file with open(filename, "wb") as file: file.write(decrypte...
[ "def decrypt_file(ctx, filename, key):\n\n click.echo(f\"Decrypting {filename}...\")\n\n # using the key\n with open(key, 'rb') as file_key:\n key = file_key.read()\n\n fernet = Fernet(key)\n\n # opening the encrypted file\n with open(filename, 'rb') as enc_file:\n encrypted = enc_fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the communication with the camera. Get's the maximum and minimum width. It also forces the camera to work on Software Trigger.
def initialize(self): logger.debug('Initializing Basler Camera') tl_factory = pylon.TlFactory.GetInstance() devices = tl_factory.EnumerateDevices() if len(devices) == 0: raise CameraNotFound('No camera found') for device in devices: if self.cam_num in dev...
[ "def initialize_sensor(self):\n if self.camera is not None:\n self._close()\n self.camera = picamera.PiCamera()\n self.camera.resolution = self.resolution\n self.camera.framerate = self.framerate\n self.rawCapture = PiRGBArray(self.camera, size=self.resolution)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set up the region of interest of the camera. Basler calls this the Area of Interest (AOI) in their manuals. Beware that not all cameras allow to set the ROI (especially if they are not area sensors). Both the corner positions and the width/height need to be multiple of 4. Compared to Hamamatsu, Baslers provides a very ...
def set_ROI(self, X: Tuple[int, int], Y: Tuple[int, int]) -> Tuple[int, int]: width = abs(X[1]-X[0])+1 width = int(width-width%4) x_pos = int(X[0]-X[0]%4) height = int(abs(Y[1]-Y[0])+1) y_pos = int(Y[0]-Y[0]%2) logger.info(f'Updating ROI: (x, y, width, height) = ({x_pos},...
[ "def set_roi(location): # points gimbal to region of interest (ROI), global location\n # create the MAV_CMD_DO_SET_ROI command\n msg = vehicle.message_factory.command_long_encode(\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_CMD_DO_SET_ROI, # command\n 0, # confirma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the ROI to the maximum area of the camera
def clear_ROI(self): self.camera.OffsetX.SetValue(self.camera.OffsetX.Min) self.camera.OffsetY.SetValue(self.camera.OffsetY.Min) self.camera.Width.SetValue(self.camera.Width.Max) self.camera.Height.SetValue(self.camera.Height.Max)
[ "def on_roi_reset_clicked(self):\r\n self.data_limits = None\r\n self.reset_roi_fields()\r\n self.update_image(load=True)", "def set_ROI(self, X: Tuple[int, int], Y: Tuple[int, int]) -> Tuple[int, int]:\n width = abs(X[1]-X[0])+1\n width = int(width-width%4)\n x_pos = int...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the current ReportContext to a newlycreated thread. This should be called from this thread's parent thread, before the new thread starts.
def add_report_ctx(thread=None, ctx=None): if thread is None: thread = threading.current_thread() if ctx is None: ctx = get_report_ctx() if ctx is not None: setattr(thread, REPORT_CONTEXT_ATTR_NAME, ctx) return thread
[ "def run_in_thread(self, func, report=True):\n factory = (self.ExceptionReportingThread if report\n else ExceptionSilencingThread)\n thread = factory(target=func)\n thread.daemon = True\n thread.start()\n return thread", "def attach_to_thread(self, thread_objec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns ReportContext | None The current thread's ReportContext, or None if it doesn't have one.
def get_report_ctx(): thread = threading.current_thread() ctx = getattr(thread, REPORT_CONTEXT_ATTR_NAME, None) if ctx is None and streamlit._is_running_with_streamlit: # Only warn about a missing ReportContext if we were started # via `streamlit run`. Otherwise, the user is likely running a...
[ "def add_report_ctx(thread=None, ctx=None):\n if thread is None:\n thread = threading.current_thread()\n if ctx is None:\n ctx = get_report_ctx()\n if ctx is not None:\n setattr(thread, REPORT_CONTEXT_ATTR_NAME, ctx)\n return thread", "def get_current_context():\n\n click_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the list of variable that are not written at the end of the function
def are_variables_written(function, variables_to_write): return list(set(_visit(function.entry_point, [], [], variables_to_write)))
[ "def variables(self):\r\n return []", "def free_variables(self) -> Set[str]:\n # Task 7.6.3\n vars = set()\n free_vars = set()\n self.form_extract_helper(free_vars, 4, vars)\n return free_vars", "def _remove_io_variables(variable_list):\n io_regex = re.compile(\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw stars on an image. The camera is assumed to be orientated at R.
def draw_stars(star_db, image, R, cam_model, mag_limit=4.0): for star in (s for s in star_db if s.mag < mag_limit): vec = R.T * star.vec if vec[2, 0] > 0: coords = tuple(map(int, cam_model.vec_to_pixel(vec))) cv.Circle(image, coords, 5, cv.CV_RGB(0, 255, 0))
[ "def draw_stars(screen, stars, screen_width, screen_height):\n\tstar_height = 50\n\tstar_width = 50\n\n\t#Get number of stars x\n\tavailable_space_x = screen_width - star_width\n\tnumber_stars_x = int(available_space_x / (2 * star_width))\n\n\t# Get the number of rows y\n\tavailable_space_y = screen_height - star_h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new manufacturer
def post(self): data = request.json return save_manufacturer(data)
[ "def create_manufacturer(part_id, input_name, company_name):\n\n Company = apps.get_model('company', 'company')\n\n manufacturer = Company.objects.create(\n name=company_name,\n description=company_name,\n is_manufacturer=True\n )\n\n # Map both names to ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all manufacturers in the system
def get(self): return get_all_manufacturers()
[ "def get_all_manufacturers(\n self, manufacturer_type: Optional[str] = None, page: int = 1\n ) -> List[Manufacturer]:\n raise NotImplementedError", "def get_all_manufacturers(self,\n page,\n size):\n\n # Prepare query URL\n _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a manufacturer with a given id
def get(self, id): return get_manufacturer(id)
[ "def get_manufacturer(self,\n manufacturer_id):\n\n # Prepare query URL\n _query_builder = Configuration.base_uri.format(Configuration.host)\n _query_builder += '/medicines/manufacturers/{manufacturer_id}'\n _query_builder = APIHelper.append_url_with_template_para...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update a manufacturer with a given id
def put(self, id): data = request.json return update_manufacturer(id, data)
[ "def update_device_by_id(self, device_id, **kwargs):\n return self.netbox_con.patch('/dcim/devices/', device_id, **kwargs)", "def update_device(self, id, values):\n values = values.copy()\n\n # convert imaging_server to its ID, and strip the id\n values['imaging_server_id'] = self._fin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of children object of a folder.
def get_children_of_folderish(context): brains = api.content.find( context=context, depth=1, sort_on='getObjPositionInParent' ) results = [b.getObject() for b in brains] return results
[ "def list_children(self):\n return self._list(self.client, children_of_group=self.name)", "def _recursive_fb_folder_search( folder, children = None, skip_strings = None ):\n\n\t# Check to be sure the folder is actually of type FBFolder\n\tif not isinstance( folder, pyfbsdk.FBFolder ):\n\t\treturn [ ]\n\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }