query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Marks a cell as safe, and updates all knowledge to mark that cell as safe as well.
def mark_safe(self, cell): self.safes.add(cell) for sentence in self.knowledge: sentence.mark_safe(cell)
[ "def mark_safe(self, cell):\n if cell in self.cells:\n self.cells-={cell}\n self.changed=True", "def mark_mine(self, cell):\n if cell in self.cells and self.count>0:\n self.cells-={cell}\n self.count-=1\n #flags this sentence as having been chan...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find sentences that can draw conclusions, adds mines or safes to list and removes sentence from knowledge base
def find_conclusion_sentences(self): for sentence in self.knowledge: new_mines=sentence.known_mines() new_safes=sentence.known_safes() if len(new_mines)>0: for mine in new_mines: self.mark_mine(mine) elif len(new_safes)>0: ...
[ "def handle_sentence(story, story_filename, sentence_num, output_file, stats,\n filter_list, filter_stats):\n # parse the sentence\n # now it is just a matter of iterating over our items and removing them\n # one by one\n # first, get just the set we care about\n count = 0\n\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
every time a new knowledge is added by add_knowledge, this method is run it will run in a loop util there are no more changed sentences sentences can be changed by it's own creation, or when new mines or safes are found everytime a sentence is changed, this method tries to subtract it from the other sentences if it is ...
def subtract_subset(self): while True: #resets flag for entire METHOD. subset_change=False for sub_sentence in self.knowledge: # runs for each SENTENCE flagged if sub_sentence.changed: sub_sentence.changed=False #clears flag...
[ "def find_conclusion_sentences(self):\n for sentence in self.knowledge:\n new_mines=sentence.known_mines()\n new_safes=sentence.known_safes()\n if len(new_mines)>0:\n for mine in new_mines:\n self.mark_mine(mine)\n elif len(new_saf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a safe cell to choose on the Minesweeper board. The move must be known to be safe, and not already a move that has been made. This function may use the knowledge in self.mines, self.safes and self.moves_made, but should not modify any of those values.
def make_safe_move(self): for c in self.safes: if c not in self.moves_made: return c
[ "def get_computer_move(self):\r\n copy = self.get_board_copy()\r\n if self.ai is None: # if did not pass in a trained agent.\r\n # First, check if computer can win in the next move\r\n for i in range(1, 10):\r\n if self.is_space_free(i): # If space is free\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if syntax of the first mark is correct, i.e. has a space before and a space after mark is the mark itself (for example ,;np ) with one character before and one after line is the complete line, line_nb her number in the text only useful if error return True if noerror, else raise exception
def checkfmark(mark,line,line_nb): if False and mark[0] != ' ': # False, because specs have changed utils.underlineall(line,mark) raise SyntaxError("Please put a space before opening tag in line {}".format(line_nb)) if mark[-1] != ' ': utils.underlineall(line,mark) raise SyntaxEr...
[ "def test_line_is_rst_title_marker(self):\n self.assertEqual(line_is_rst_title_marking(''), False)\n self.assertEqual(line_is_rst_title_marking(' '), False)\n self.assertEqual(line_is_rst_title_marking(' hello world'), False)\n self.assertEqual(line_is_rst_title_marking('hello world ')...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize all storage arrays based on of stars and targets
def initializeStorageArrays(self): self.DRM = [] self.fullSpectra = np.zeros(self.SimulatedUniverse.nPlans, dtype=int) self.partialSpectra = np.zeros(self.SimulatedUniverse.nPlans, dtype=int) self.propagTimes = np.zeros(self.TargetList.nStars)*u.d self.lastObsTimes = np.ze...
[ "def initializeStorageArrays(self):\n\n self.DRM = []\n OS = self.OpticalSystem\n SU = self.SimulatedUniverse\n allModes = OS.observingModes\n num_char_modes = len(\n list(filter(lambda mode: \"spec\" in mode[\"inst\"][\"name\"], allModes))\n )\n self.full...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
String representation of the Survey Simulation object When the command 'print' is used on the Survey Simulation object, this method will return the values contained in the object
def __str__(self): for att in self.__dict__: print('%s: %r' % (att, getattr(self, att))) return 'Survey Simulation class object attributes'
[ "def __str__(self):\n return self.simulationName", "def __repr__(self):\r\n return str(vars(self))", "def __str__(self):\n output = \"This ChoiceStruct holds the following elements:\\n\"\n for choiceObj in self.contents:\n output += str(choiceObj) + \"\\n\"\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs the survey simulation
def run_sim(self): OS = self.OpticalSystem TL = self.TargetList SU = self.SimulatedUniverse Obs = self.Observatory TK = self.TimeKeeping # TODO: start using this self.currentSep # set occulter separation if haveOcculter if OS.ha...
[ "def simulate():\n tags = []\n answer_session = models.AnswerSession()\n answer_session.save()\n question = models.Question.get_best_question(answer_session)\n session['answers_id'] = str(answer_session.id)\n return render_template(\n 'simulate.html',\n q=question,\n enumerate...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles fully dynamically scheduled case where OBduration is infinite and missionPortion is less than 1. Input dt is the total amount of time, including all overheads and extras used for the previous observation.
def arbitrary_time_advancement(self,dt): self.TimeKeeping.allocate_time( dt*(1. - self.TimeKeeping.missionPortion)/self.TimeKeeping.missionPortion,\ addExoplanetObsTime=False )
[ "def _flow_time_step(self, dt: float, **kwargs):\n ...", "def process_special_duration_cases(instruction):\r\n\r\n def find_pair_on_off_duration_schedules(instruction):\r\n for sched_0_index in range(len(instruction.schedules) - 1):\r\n (schedule_0, schedule_1) = instructio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds index of next target star and calculates its integration time. This method chooses the next target star index based on which stars are available, their integration time, and maximum completeness. Returns None if no target could be found.
def next_target(self, old_sInd, mode): OS = self.OpticalSystem ZL = self.ZodiacalLight Comp = self.Completeness TL = self.TargetList Obs = self.Observatory TK = self.TimeKeeping # create DRM DRM = {} # allocate settling...
[ "def find_closest_star():\n \n params = get_args()\n \n (reduction_metadata, star_catalog) = fetch_metadata(params)\n \n dx = star_catalog['x']-params['x']\n dy = star_catalog['y']-params['y']\n sep = np.sqrt(dx*dx + dy*dy)\n \n idx = np.where( sep == sep.min() )\n \n j = sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Refines/filters/chooses occulter slews based on time constraints Refines the selection of occulter slew times by filtering based on mission time constraints and selecting the best slew time for each star. This method calls on other occulter methods within SurveySimulation depending on how slew times were calculated pri...
def refineOcculterSlews(self, old_sInd, sInds, slewTimes, obsTimes, sd, mode): Obs = self.Observatory TL = self.TargetList # initializing arrays obsTimeArray = np.zeros([TL.nStars,50])*u.d intTimeArray = np.zeros([TL.nStars,2])*u.d for...
[ "def findAllowableOcculterSlews(self, sInds, old_sInd, sd, slewTimes, obsTimeArray, intTimeArray, mode):\r\n TK = self.TimeKeeping\r\n Obs = self.Observatory\r\n TL = self.TargetList\r\n\r\n # 0. lambda function that linearly interpolates Integration Time between obsTimes\r\n lin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filters occulter slews that have already been calculated/selected. Used by the refineOcculterSlews method when slew times have been selected a priori. This method filters out slews that are not within desired observing blocks, the maximum allowed integration time, and are outside of future keepouts.
def filterOcculterSlews(self, sInds, slewTimes, obsTimeArray, intTimeArray, mode): TK = self.TimeKeeping Obs = self.Observatory #allocate settling time + overhead time tmpCurrentTimeAbs = TK.currentTimeAbs.copy() + Obs.settlingTime + mode['syst']['ohTime'] tmpCu...
[ "def refineOcculterSlews(self, old_sInd, sInds, slewTimes, obsTimes, sd, mode):\r\n \r\n Obs = self.Observatory\r\n TL = self.TargetList\r\n \r\n # initializing arrays\r\n obsTimeArray = np.zeros([TL.nStars,50])*u.d\r\n intTimeArray = np.zeros([TL.nStars,2])*u.d\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds an array of allowable slew times for each star Used by the refineOcculterSlews method when slew times have NOT been selected a priori. This method creates nx50 arrays (where the row corresponds to a specific star and the column corresponds to a future point in time relative to currentTime). These arrays are initi...
def findAllowableOcculterSlews(self, sInds, old_sInd, sd, slewTimes, obsTimeArray, intTimeArray, mode): TK = self.TimeKeeping Obs = self.Observatory TL = self.TargetList # 0. lambda function that linearly interpolates Integration Time between obsTimes linearInterp = lambd...
[ "def refineOcculterSlews(self, old_sInd, sInds, slewTimes, obsTimes, sd, mode):\r\n \r\n Obs = self.Observatory\r\n TL = self.TargetList\r\n \r\n # initializing arrays\r\n obsTimeArray = np.zeros([TL.nStars,50])*u.d\r\n intTimeArray = np.zeros([TL.nStars,2])*u.d\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Selects the best slew time for each star This method searches through an array of permissible slew times for each star and chooses the best slew time for the occulter based on maximizing possible characterization time for that particular star (as a default).
def chooseOcculterSlewTimes(self,sInds,slewTimes,dV,intTimes,charTimes): # selection criteria for each star slew good_j = np.argmax(charTimes,axis=1) # maximum possible characterization time available good_i = np.arange(0,len(sInds)) dV = dV[good_i,good_j] ...
[ "def _get_next_optimal_lunch_time(self, demands: np.ndarray, shifts: np.ndarray, current_search_depth: int = 1):\n results = []\n chosen_shifts = []\n chosen_indices = []\n\n bounds = self._get_lunch_time_bounds(shifts=shifts)\n shift_indices = list(bounds.keys())\n min_bou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines SNR and detection status for a given integration time for detection. Also updates the lastDetected and starRevisit lists.
def observation_detection(self, sInd, intTime, mode): PPop = self.PlanetPopulation Comp = self.Completeness OS = self.OpticalSystem ZL = self.ZodiacalLight PPro = self.PostProcessing TL = self.TargetList SU = self.SimulatedUniverse Obs = ...
[ "def statistics(self, yolo3_rtt, crnn_rtt, detections, recognitions):\n if not self.yolo3_rtt:\n self.yolo3_rtt = yolo3_rtt\n else:\n self.yolo3_rtt = self.yolo3_rtt * 0.98 + yolo3_rtt * 0.02\n if not self.crnn_rtt:\n self.crnn_rtt = crnn_rtt\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A Helper Method for scheduling revisits after observation detection
def scheduleRevisit(self,sInd,smin,det,pInds): TK = self.TimeKeeping TL = self.TargetList SU = self.SimulatedUniverse # in both cases (detection or false alarm), schedule a revisit # based on minimum separation Ms = TL.MsTrue[sInd] if smin is not None:#s...
[ "def reschedule_visit(self):\n # get what we clicked on\n row_i = self.visit_table.currentRow()\n if row_i == -1:\n print(\"DEBUG: reschedule visit but no visit selected!\")\n return\n\n status_i = self.visit_columns.index(\"vstatus\")\n vstatus = self.visit_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the occulter wet mass in the Observatory module, and stores all the occulter related values in the DRM array.
def update_occulter_mass(self, DRM, sInd, t_int, skMode): TL = self.TargetList Obs = self.Observatory TK = self.TimeKeeping assert skMode in ('det', 'char'), "Observing mode type must be 'det' or 'char'." #decrement mass for station-keeping ...
[ "def update_obs(self):\n self._update_attr(\"obs\", axis=1)", "def _update_materials(self):\n\n for rank in range(comm.size):\n number_i = comm.bcast(self.number, root=rank)\n\n for mat in number_i.materials:\n nuclides = []\n densities = []\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Join all _outspec dicts from all modules into one output dict and optionally write out to JSON file on disk.
def genOutSpec(self, tofile=None): # start with a copy of MissionSim _outspec out = copy.copy(self._outspec) # add in all modules _outspec's for module in self.modules.values(): out.update(module._outspec) # add in the specific modu...
[ "def _write_detected_modules(self, f, output_type, output_module_name):\n if output_type == 'kconf':\n writer = KernelConfigWriter(f)\n elif output_type == 'module':\n writer = ModuleConfigWriter(f)\n else:\n log.die(\"Invalid output_type '{}'\".format(output_ty...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate cached file Hashname Requires a .XXX appended to end of hashname for each individual use case
def generateHashfName(self, specs): cachefname = ''#declares cachefname mods = ['Completeness','TargetList','OpticalSystem'] #modules to look at tmp= self.Completeness.PlanetPopulation.__class__.__name__ + \ self.PlanetPopulation.__class__.__name__ + \ self.Simulate...
[ "def create_cachefile_name(key, extension):\n return reex.sub(r\"(.*/)*(.*\\.).*\", r\"__cache__\\2\" + extension, key)", "def _genhash( self, fileref ):\n\t\treturn toolbox.md5( fileref )", "def get_hashed_filename(name, file, suffix=None):\n basename, hash, ext = split_filename(name)\n file.seek(0)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Encodes numpy arrays, astropy Times, and astropy Quantities, into JSON. Called from json.dump for types that it does not already know how to represent, like astropy Quantity's, numpy arrays, etc. The json.dump() method encodes types like integers, strings, and lists itself, so this code does not see these types. Li...
def array_encoder(obj): from astropy.time import Time from astropy.coordinates import SkyCoord if isinstance(obj, Time): # astropy Time -> time string return obj.fits # isot also makes sense here if isinstance(obj, u.quantity.Quantity): # note: it is possible to have...
[ "def array_encoder(obj):\n \n from astropy.time import Time\n from astropy.coordinates import SkyCoord\n if isinstance(obj, Time):\n # astropy Time -> time string\n return obj.fits # isot also makes sense here\n if isinstance(obj, u.quantity.Quantity):\n # note: it is possible to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return true if the current token type is keyword
def keyWord_type(self): return bool(self.current_token in JackTokenizer.keyWords)
[ "def keyword(token, kw=None):\n return isA(token, tt=\"KEYWORD\", tv=kw)", "def is_keyword(self):\n return keyword.iskeyword(self.string)", "def is_keyword(self, *keywords):\r\n if self.token is None:\r\n self.get_next()\r\n return self.token == 'identifier' and self.text.lowe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return true if the current token type is symbol
def symbol_type(self): return bool(self.current_token in JackTokenizer.symbols)
[ "def symbol(token, sym=None):\n return isA(token, tt=\"SYMBOL\", tv=sym)", "def isToken(symbol): \n key = symbols.get(symbol, \"NO\")\n if isinstance(key, str):\n return symbol\n else:\n return key", "def isNextSymbol(self):\r\n reg = re.compile(\"^(\\{|}|\\(|\\)|\\[|\\.|]|,|;|...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return true if the current token type is str const
def str_const_type(self): return bool(re.fullmatch("\".*?\"", self.current_token)) # "...."
[ "def is_string(self):\n return self.type == py_tokenize.STRING", "def IsLiteral(self) -> bool:", "def is_string(self):\n return type(self.value) == str", "def isNextString(self):\r\n reg = re.compile('^(\\\"[^\\\"]*\\\")', re.DOTALL)\r\n match = re.search(reg, self.lines)\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return true if the current token type is int const
def int_const_type(self): return bool(re.fullmatch("([0-9])*", self.current_token))
[ "def at_type_id(self):\n # Implement lexer hack here:\n if self.token:\n # Also implement lexer hack here:\n if self.token.typ == \"ID\" and self.token.val in self.typedefs:\n return True\n return False", "def isInteger(self, typ):\n return typ == \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return true if the current token type is identifier
def identifier_type(self): return bool(re.fullmatch("(_|[a-z]|[A-Z])([a-z]?[A-Z]?[0-9]?_?)*", self.current_token))
[ "def at_type_id(self):\n # Implement lexer hack here:\n if self.token:\n # Also implement lexer hack here:\n if self.token.typ == \"ID\" and self.token.val in self.typedefs:\n return True\n return False", "def identifier(token, ident=None):\n return isA...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the character which is the current token. Should be called only when tokenType() is SYMBOL .
def symbol(self): return self.current_token
[ "def symbol(self): \n \n return self._content[self._current_command].split(' ')[0][1:]", "def current_char(self) -> str:", "def symbol(self):\n if self.command_type() == 'A_COMMAND':\n return self.next_command.split('@')[1]\n if self.command_type() == 'L_COMMAND':\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the identifier which is the current token. Should be called only when tokenType() is IDENTIFIER .
def identifier(self): return self.current_token
[ "def identifier(self):\n result = \"\"\n while self.current_char is not None and self.current_char.isalnum():\n result += self.current_char\n self.advance()\n return result", "def identifier(token, ident=None):\n return isA(token, tt=\"IDENTIFIER\", tv=ident)", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the integer value of the current token. Should be called only when tokenType() is INT_CONST .
def int_val(self): return int(self.current_token)
[ "def next_int(self):\n\t\ttoken = self.next_token()\n\t\tif token != None:\n\t\t\treturn int(token)", "def next_integer(self):\n try:\n return int(self._tokens.pop(0))\n\n except IndexError:\n raise Exception(\"Invalid request: \" + self._request)", "def transform_integer_lit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the string value of the current token, without the double quotes. Should be called only when tokenType() is STRING_CONST .
def string_val(self): return str(self.current_token)
[ "def stringVal(self):\r\n return self.token", "def token(self):\n return self.cst_value if self.is_terminal else None", "def getToken(self) -> str:\n return self.__newToken", "def dump_token(self, token, verbose=False):\n if isinstance(token, str):\n return token\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the equality constraints for power (from BTHETA approximation) in the branch as a bigM
def declare_eq_branch_power_btheta_approx_bigM(model, index_set, branches): m = model con_set = decl.declare_set("_con_eq_branch_power_btheta_approx_bigM_set", model, index_set) m.eq_pf_branch_ub = pe.Constraint(con_set) m.eq_pf_branch_lb = pe.Constraint(con_set) for branch_name in con_set: ...
[ "def declare_eq_branch_power_btheta_approx_nonlin(model, index_set, branches):\n m = model\n\n con_set = decl.declare_set(\"_con_eq_branch_power_btheta_approx_bigM_set\", model, index_set)\n\n m.eq_pf_branch_ub = pe.Constraint(con_set)\n m.eq_pf_branch_lb = pe.Constraint(con_set)\n for branch_name in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the equality constraints for power (from BTHETA approximation) in the branch as a bigM
def declare_eq_branch_power_btheta_approx_nonlin(model, index_set, branches): m = model con_set = decl.declare_set("_con_eq_branch_power_btheta_approx_bigM_set", model, index_set) m.eq_pf_branch_ub = pe.Constraint(con_set) m.eq_pf_branch_lb = pe.Constraint(con_set) for branch_name in con_set: ...
[ "def declare_eq_branch_power_btheta_approx_bigM(model, index_set, branches):\n m = model\n\n con_set = decl.declare_set(\"_con_eq_branch_power_btheta_approx_bigM_set\", model, index_set)\n\n m.eq_pf_branch_ub = pe.Constraint(con_set)\n m.eq_pf_branch_lb = pe.Constraint(con_set)\n for branch_name in c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take 2 perfectly aligned images and find the differences using structural similarity. Return img_a with rectangular contours at the difference positions n is the maximun number of differences expected.
def find_differences( img_a, img_b, tresh_quantile=0.95, ssim = True, n_diff=15): # # 1. blurring # # kernel 2% of the image size kernel_size = int(img_a.shape[1]/50) # must be odd if median kernel_size += kernel_size%2-1 img_a_blurred = cv2.GaussianBlur( ...
[ "def compareImageAgainstAnotherImageGetScore_Features(img1, img2, flag_debug):\n\n # parameters\n filterMatchRatio = 0.75\n\n\n # create a detector and matcher object\n detector, matcher = createDetectorMatcher()\n\n # error if no descriptors were created for either image\n features1, descriptors1 = (detector...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take a binary image and return the nst largest components. If the number of component is less, return all components If remove_borders is set, it will remove all components that at least half the image width or length stats array cv2.CC_STAT_LEFT The leftmost (x) coordinate which is the inclusive start of the bounding ...
def largest_components( binary_img, n, remove_borders=True): # detect connected components retval, labels, stats, centroids = \ cv2.connectedComponentsWithStats(binary_img) if remove_borders: img_w, img_h = binary_img.shape components = [] for i, stat in enumerate(...
[ "def extract_biggest_connected_component(mask: np.ndarray) -> np.ndarray:\n # extract all connected components\n num_labels, labels_im = cv2.connectedComponents(mask.astype(np.uint8))\n \n # we find and return only the biggest one\n max_val, max_idx = 0, -1\n for i in range(1, num_labels):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
remove host from cluster.
def rm_host(self, host, is_master): self.hosts.pop(host)
[ "def remove(self, host, reuseConnection = None):\n # TODO: Implement this.\n raise Exception( \"Not implemented\" )", "def host_cluster_delete(context, cluster_id, host_name):\n # If we weren't given a session, then we need to create a new one\n session = nova_db_sa_api.get_session()\n # Cr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return current available hosts.
def get_hosts(self): return self.hosts
[ "def getHosts(self):\n req = requests.get(self.baseURL+\"host\", headers=self.header)\n return req.json()['hosts']", "def allowed_hosts(self):\n return self.__allowed_hosts", "def hosts(self) -> Optional[Sequence['outputs.DedicatedHostInstanceViewWithNameResponse']]:\n return pulumi....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set default parameter name. If parameter not set, then will use default parameter name.
def set_default_parameter_name(self, name): if 'parameter_name' not in self.attr: self.attr['parameter_name'] = name
[ "def name(self, name_):\n if name_ is None:\n name_ = PARAMETER_NAME_DEFAULT\n elif isinstance(name_, str):\n name_ = name_.strip()\n if name_ == '':\n name_ = PARAMETER_NAME_DEFAULT\n if len(name_) > PARAMETER_NAME_PREFIX_MAX_LEN:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test slack notification on main channel
def test_slack(): import requests from config import SLACK_WEBHOOKS message = SlackNewAPIMessage("0xTEST", "MyAPI", "An API.", "tester") response = requests.post(SLACK_WEBHOOKS[0]["webhook"], json=message.compose()) print(response.status_code) print(response.text)
[ "def test_metagov_slack_trigger(self):\n # 1) Create Policy that is triggered by a metagov action\n policy = Policy(kind=Policy.PLATFORM)\n policy.community = self.slack_community\n policy.filter = \"\"\"return action.action_codename == 'slackpinmessage'\"\"\"\n policy.initialize ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sort candidates based on language model score
def __sort_candidate(self,candidates, tokens, pos, penalty_func): candidates_score_pair = [] candidates_sentence = [] for c in candidates: cand_tokens = tokens[:] cand_tokens[pos] = c candidates_sentence.append([c, cand_tokens[:]]) if not self.batch_sc...
[ "def sort_countries_by_relevance(self):\n ### TODO this code would not work anymore\n #score = {}\n #for country in self.country_select.options:\n # score[country] = self.adfCountryData[country].infection_rate_7.values[-1]\n #score_sorted = {k: v for k, v in sorted(score.items(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the given leafs to the server.
def add_leafs(self, leafs: List[Tuple["Controller", str]]) -> None: for controller, leaf in leafs: self._master.add_leaf(leaf, controller)
[ "def f_add_leaf(self, *args, **kwargs):\n\n return self._nn_interface._add_generic(self, type_name=LEAF,\n group_type_name=GROUP,\n args=args, kwargs=kwargs,\n add_prefix=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the es aggregation result. Override this method for result customization
def aggregation_result(self, aggregation_result: Dict[str, Any]) -> Dict[str, Any]: return aggregation_result.get(self.id, aggregation_result)
[ "def get_aggregations(self, slot):", "def file_as_aggregate_from_solr_search(solr_search_result):\n\n search_results = copy.deepcopy(solr_search_result)\n for doc in search_results['response']['docs']:\n for attr in doc:\n if attr == 'type':\n continue\n if not ha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements the common mechanism to define aggregations with nested fields
def _build_aggregation(self, *args, **kwargs) -> Dict[str, Any]: return aggregations.nested_aggregation( nested_path=self.nested_path, inner_aggregation=self._inner_aggregation(*args, **kwargs), )
[ "def get_aggregations(self, slot):", "def test_aggregation(self):\n for aggregate in (Sum, Avg, Variance, StdDev):\n with self.assertRaises(NotSupportedError):\n Item.objects.aggregate(aggregate(\"time\"))\n with self.assertRaises(NotSupportedError):\n It...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare two ids and return differences id_1 the first id id_1 the second id a list of differnces betweens id_1 and id_2 formatted as a tuple of the index of the difference, the letter in id_1 and the letter in id_2
def compare_ids(id_1, id_2): differences = [] for i in range(len(id_1)): if id_1[i] != id_2[i]: differences.append((i, id_1[i], id_2[i])) return differences
[ "def find_pair_differs_by_one_char(box_ids):\n for str1, str2 in itertools.combinations(box_ids, r=2):\n difference_result = differs_by_one_char_same_len(str1, str2)\n if difference_result != -1:\n return (difference_result, (str1, str2))\n return None", "def find_two_similar_boxes(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the close pair of id's with only 1 differences a list of ids a tuple of 2 ids which only have one letter different
def find_correct_box_ids(id_list): for i in id_list: for j in id_list: compare_result = compare_ids(i, j) if len(compare_result) == 1: return (i, j)
[ "def find_pair_differs_by_one_char(box_ids):\n for str1, str2 in itertools.combinations(box_ids, r=2):\n difference_result = differs_by_one_char_same_len(str1, str2)\n if difference_result != -1:\n return (difference_result, (str1, str2))\n return None", "def find_similar_pairs(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> common_letters(['abcde', 'fghij', 'klmno', 'pqrst', 'fguij', 'axcye', 'wvxyz']) 'fgij'
def common_letters(id_list): pair = find_correct_box_ids(id_list) pair_difference = compare_ids(pair[0], pair[1])[0] char_list = list(pair[1]) char_list.pop(pair_difference[0]) return "".join(char_list)
[ "def common_charecters(string1, string2):\n\n first_String= string1.lower()\n second_String= string2.lower()\n\n common = []\n\n for charecter in first_String:\n if charecter in second_String:\n common.append(charecter)\n else:\n None\n\n print(\"Common letters: {}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new image object using the given blender image. Returns the created image object.
def create_img_from_blender_image(blender_image): if blender_image is None: return None return create_img_from_pixels(blender_image.size[0], blender_image.size[1], blender_image.pixels[:])
[ "def create(width,height,depth):\r\n\r\n # Creating the image.\r\n im = mambaCore.MB_Image()\r\n err = mambaCore.MB_Create(im,width,height,depth)\r\n raiseExceptionOnError(err)\r\n \r\n return im", "def New(*args, **kargs):\n obj = itkImageB2.__New_orig__()\n from itk.support impor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines how many days have elapsed between horse's last race and the reference date. Returns 0 if this is its first race.
def get_days_since_last_race(self, reference_date:date) -> int: len_races = len(self.races) # Base case 1: No previous races -> return 0 if len_races == 0: return 0 # Base case 2: reference_date is before the earliest race on record: if reference_date < self.races[...
[ "def lifespan(self, date):\n if not hasattr(self, '_lifespan'):\n self._lifespan = (date - self.created.date()).days\n if self._lifespan < 0:\n return 0\n return self._lifespan", "def lead_time(self):\n diff = self.ended['entered_at'] - self.committed['ent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processed the encost file into a python structure which is a list of dicts
def parse_encost(fpath: Path) -> Tuple[List[dict], int]: fname = fpath.with_suffix("").name year = int(f"20{fname.replace('encost', '')}") print(f"Start parsing {fpath=}, {year=}") with open(fpath, "r") as f: content = f.read() lines = [x.strip() for x in content.splitlines()] region ...
[ "def parse_file(self, f):\n final_output = {}\n for line in f:\n output = self.line(line)\n self.merge_output(final_output, output)\n return final_output", "def data2addict(fname):\n\n adj = {}\n\n try:\n with open(fname, 'r') as data:\n for line ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
skip event if nev_list not empty and iev si not in the list
def skip_event(iev,nev_list): if len(nev_list) == 0: skip = False else: if (iev in nev_list): skip = False else: skip = True return skip
[ "def skip(self):\n if self._check_notified():\n return\n self.result_synchronizer.notify((\"skip\",None))", "def eventcheckin():", "def skip_unless_is_ovn():\n return skip_unless_missing_networking_agents(OPENVSWITCH_AGENT)", "def OnIgnoreAll(self, evt):\r\n self._checker.ig...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compress a UTF8 string in a way safe for passage as an argument through fork/exec, but not necessarily shells
def compress_string(string: str) -> str: from zlib import compress from base64 import b64encode string_bytes = string.encode('utf-8') debug('initial string is {} bytes in size'.format(len(string_bytes))) string_compressed = compress(string_bytes) debug('string is {} bytes in size after compress...
[ "def compress_string(uncompressed_string):\n bytes_buffer = BytesIO()\n with GzipFile(mode='wb', fileobj=bytes_buffer) as f:\n f.write(uncompressed_string.encode('utf-8', errors='ignore'))\n return bytes_buffer.getvalue()", "def canonical_string_encoder(string):\n\n string = '\"%s\"' % re.sub(r'(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks that an ExtrudeMixedShape can be created with a combination of straight and circular connections
def test_mixed_shape_with_straight_and_circle(self): test_shape = ExtrudeMixedShape( points=[ (10, 20, "straight"), (10, 10, "straight"), (20, 10, "circle"), (22, 15, "circle"), (20, 20, "straight"), ], ...
[ "def test_conditional_solid_reconstruction(self):\n\n test_shape = ExtrudeStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n distance=20\n )\n\n assert test_shape.solid is not None\n assert test_shape.hash_value is not None\n initial_hash_value = test_sha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates an ExtrudeMixedShape and checks that an stp file of the shape can be exported using the export_stp method
def test_export_stp(self): test_shape = ExtrudeMixedShape( points=[ (10, 20, "straight"), (10, 10, "straight"), (20, 10, "circle"), (22, 15, "circle"), (20, 20, "straight"), ], distance=10, ...
[ "def test_export_stl(self):\n\n test_shape = ExtrudeMixedShape(\n points=[\n (10, 20, \"straight\"),\n (10, 10, \"straight\"),\n (20, 10, \"circle\"),\n (22, 15, \"circle\"),\n (20, 20, \"straight\"),\n ],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates an ExtrudeMixedShape and checks that an stl file of the shape can be exported using the export_stl method
def test_export_stl(self): test_shape = ExtrudeMixedShape( points=[ (10, 20, "straight"), (10, 10, "straight"), (20, 10, "circle"), (22, 15, "circle"), (20, 20, "straight"), ], distance=10, ...
[ "def test_export_stp(self):\n\n test_shape = ExtrudeMixedShape(\n points=[\n (10, 20, \"straight\"),\n (10, 10, \"straight\"),\n (20, 10, \"circle\"),\n (22, 15, \"circle\"),\n (20, 20, \"straight\"),\n ],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A command which returns some number of catfacts
def catfact(self, mess, args): try: number = max(1, min(self.config['MAX_FACTS'], int(args))) if args else None facts = self.get_catfacts(number) for ii in facts: yield ii except Exception: pass
[ "async def random_fact_cat(self, ctx):\n if command := ctx.bot.get_command(\"random cat fact\"):\n await ctx.invoke(command)\n else:\n raise RuntimeError(\n \"random cat fact command not found \"\n \"when random fact cat command invoked\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Triggers a catfact in the configured channel
def catfact_trigger(self, mess, args): if 'FACT_CHANNEL' in self.config and self.build_identifier(self.config['FACT_CHANNEL']): self.random_fact()
[ "def update_channel(self, channel):", "def channelJoined(self, channel):", "async def channel(self, ctx, channel: discord.TextChannel = None):\n if channel is None:\n channel_id = ctx.cog_config['channel_id']\n if channel_id is None:\n await ctx.send(\"I'm not posting...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Activates plugin, activating the random facts if the period is positive seconds
def activate(self): super(Catfacts, self).activate() if self.config['FACT_PERIOD_S'] > 0: self.start_poller(self.config['FACT_PERIOD_S'], self.random_fact)
[ "def autospawnRandom(self, dt):\n if not self.paused:\n choice = random.randint(0, 1)\n if choice:\n self.spawnMob(\"Q\", free=True)\n else:\n self.spawnMob(\"E\", free=True)", "def randomise(self):\n self.timer = self.period * random.ra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return set of canonicalized entities to add to vocabulary
def get_canonicalized_entities(entities): canonicalized = set() for name, values in entities.items(): for v in values: canonicalized.add("({0}*{1})".format(name, v)) return canonicalized
[ "def preprocess_sentences(sentences, vocab):\n # Add sentence boundaries, canonicalize, and handle unknowns\n words = flatten([\"<s>\"] + s + [\"</s>\"] for s in sentences)\n words = [canonicalize_word(w, wordset=vocab.word_to_id)\n for w in words]\n return np.array(vocab.words_to_ids(words)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process api results extracting restaurant information and return tuples of restaurant info
def process_api_results(api_results): restaurant_info = collections.defaultdict(dict) for idx, result in enumerate(api_results): values = result.split(" ") # Populate dict of restaurant restaurant_info[values[0]]['name'] = values[0] restaurant_info[values[0]][values[1]] = value...
[ "def getReviews(self, res_name, entity_id = 0, entity_type = \"\"):\n self.logger.info(\"Restaurant review for : %s\", res_name)\n res_review = []\n res_id = 0\n if entity_id == 0 and not entity_type:\n zomato_url = \"https://developers.zomato.com/api/v2.1/search?q=\"+res_name...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save dict mapping from dialogue number to set of potential candidates in dialogue
def get_dialogue_restr(dialogue_file, db): c = sqlite3.connect(db) curs = c.cursor() with open(dialogue_file, "r") as f: dialogues = pickle.load(f) dial_to_rests = collections.defaultdict(set) # Get restr. candidates from api_calls for idx, dial in enumerate(dialogues): dial =...
[ "def _execute(self):\n violations = defaultdict(list)\n viable_candidates = set(self.candidates)\n inputs = (self.root, self.residue)\n\n for constraint in self.ranked_constraints:\n min_violation = min(constraint(candidate, inputs)\n for candida...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test module intqrt.py by downloading intqrt.csv and testing shape of extracted data has 124 rows and 23 columns
def test_intqrt(): test_path = tempfile.mkdtemp() x_train, metadata = intqrt(test_path) try: assert x_train.shape == (124, 23) except: shutil.rmtree(test_path) raise()
[ "def test_read_nshort(self):\n\t\t# NUMBER_OF_VALUES = 1889\n\t\t# NUMBER_OF_KEYS = 10\n\t\t# key, value = csv.reader(open(os.path.join(data_path,\n\t\t# \t\"Neighborhoods-Short.csv\")))\n\n\t\t# self.assertEqual(NUMBER_OF_VALUES,)\n\t\t\n\t\t\n\t\t#data type\n\t\t#dataframe dimensions", "def thd_reader(filename)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fixed body adapted to us.
def createFixedBody(ra, dec, epoch): fixedBody = ephem.FixedBody() fixedBody._ra = ra fixedBody._dec = dec fixedBody._epoch = epoch return fixedBody
[ "def test_fixed_body_constraint_armature(self):\n # Create robot with freeflyer, set rotor inertia.\n robot = load_urdf_default(\n self.urdf_name, self.motors_names, has_freeflyer=True)\n\n # Enable rotor inertia\n J = 0.1\n motor_options = robot.get_motors_options()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine foes that can be hit in this turn. Return a list of foes and correct angles
def visible_targets(self): loc = self.loc obs = self.observation visible_targets = {} max_turn = self.settings.max_turn agent_radius = 7 # Foes are possible targets if in range and in turnrange foes_in_range = [(foe, angles_plus_dist(loc, foe, agent_radiu...
[ "def detect_far_armor(self, far_cnts):\n armors = []\n for far_cnt in far_cnts:\n rect = cv2.minAreaRect(far_cnt)\n armor = PotentialArmor(rect=rect)\n if not armor.check_validity(self.img):\n continue\n armors.append(armor)\n if SH...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yield indices of all wall corners and the type.
def get_corners(walls, tilesize=16): corners = [] for i in xrange(len(walls) - 1): for j in xrange(len(walls[0]) - 1): a = walls[i][j] b = walls[i + 1][j] c = walls[i][j + 1] d = walls[i + 1][j + 1] if a + b + c + d == 1: ...
[ "def get_walls_positions(self):\n walls_positions = []\n for y, line in enumerate(self.lines_list):\n for x, char in enumerate(line):\n if char == \"W\":\n walls_positions.append((x, y))\n return walls_positions", "def walls(x, y, width, height):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_makeUnmergedDataset_ Call this method if you want to make unmerged datasets. Default is to not make unmerged datasets
def makeUnmergedDataset(self): self.unmergedDataset = True
[ "def create_data():\n\n filtered_uk = __filter_uk_data()\n filtered_il = __filter_il_data()\n merged_df = __merge_df(df_uk=filtered_uk, df_il=filtered_il)\n\n return merged_df", "def build_synthetic_dataset(self):\n pass", "def merge(self, dataset):\n self.__dataset.update(**dataset)", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_setRunNumber_ Temp hack used to generate LFNs for conversion
def setRunNumber(self, runNumber): self.runNumber = runNumber
[ "def set_run_number(self, run_number):\n self._run_number = run_number", "def set_num_epoch(self, num_epoch):\n self.num_epoch = num_epoch", "def number_datafile(run_number, prefix=\"PLP\"):\n try:\n num = abs(int(run_number))\n # you got given a run number\n return \"{0}{1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_makeTier0LFN_ Generate an LFN for this workflow
def makeTier0LFN(self): # // # // Remove stream name from primary dataset name #// primaryDataset = self.inputDataset['Primary'] primaryDatasetElements = primaryDataset.rsplit("-",1) if ( len(primaryDatasetElements) > 1 ): datasetName = primaryDatasetElement...
[ "def _create_train_loop_fn(train_step_fn, options: StandardTrainerOptions):\n if options.use_tf_while_loop:\n loop_fn = loop_fns.create_tf_while_loop_fn(train_step_fn)\n if options.use_tpu_summary_optimization:\n loop_fn = loop_fns.LoopFnWithSummaries(loop_fn)\n else:\n loop_fn = tf.function(loo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_Validate_ Private method to test all options are set. Throws a WorkflowMakerError if any problems found
def _Validate(self): WorkflowMaker._Validate(self) if self.runNumber == None: msg = "runNumber Attribute Not Set" raise WorkflowMakerError(msg) return
[ "def clean_and_validate_options(self):\n pass", "def ValidateOptions(self, opt, args):", "def validate_settings(self):\n pass", "def check_required(self):\n die = False\n for key, value in self.spec.items():\n if not getattr(self, key.upper()) and value['required']:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an appropriate iterator for ``record_type``.
def _get_resource_iterator(record_type, credentials, sub_index, sub, tenant): sub_id = sub.get('subscription_id') if record_type == 'virtual_machine': client = ComputeManagementClient(credentials, sub_id) return client.virtual_machines.list_all() if record_type =...
[ "def get_events_by_record_type(self, event_record_type):\n return # osid.calendaring.EventList", "def gen_record_item(record: RecordType):\n raise NotImplementedError", "def get_recurring_events_by_record_type(self, recurring_event_record_type):\n return # osid.calendaring.RecurringEventL...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split a network security group (NSG) into multiple firewall rules. An Azure NSG record contains a toplevel key named ``security_rules`` whose value is a list of security rules. In order to make it easier to write event plugins to detect security issues in an NSG, we generate a new firewall rule record for each security...
def _get_normalized_firewall_rules(nsg_record, sub_index, sub, tenant): security_rules = nsg_record.get('raw', {}).get('security_rules') nsg_name = nsg_record.get('raw', {}).get('name') if security_rules is None: _log.warning('Found NSG without security_rules; name: %s; %s', ns...
[ "def create_security_group_rules(self, data):\n return self._bulk_create(_security_group_rule.SecurityGroupRule, data)", "def enforce_security_groups_rules(self) -> None:\n sagsnl_sg = self.get_security_group(SwiftComponents.SAGSNL + \"SG\")\n rds_sg = self.get_security_group(\"RDSSG\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalize records of type `rdbms`.
def _get_normalized_rdbms_record(rdbms_record): ssl_enforcement = rdbms_record.get('raw', {}).get('ssl_enforcement') ssl_connection_enabled = (ssl_enforcement == 'Enabled') normalized_rdbms_record = { 'raw': rdbms_record.get('raw', {}), 'ext': util.merge_dicts(rdbms_record.get('ext'), { ...
[ "def convert_records(self):\r\n\t\tcn = 0\r\n\t\tfor o in self.coll.find():\r\n\t\t\tcn += 1\r\n\t\t\tif cn % 1000 == 0: print cn\r\n\t\t\tname = o['text']\r\n\t\t\tlines = o['text'].splitlines()\r\n\t\t\to['lines'] = []\r\n\t\t\tn = 0\r\n\t\t\tfor l in lines:\r\n\t\t\t\tn += 1\r\n\t\t\t\tl = l.strip().replace(u'\\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse NIWA's shitty Hydro format to get some useful information and filenames out of it. If they ever change the ordering this will need to be redone as a lot is hard coded
def parse_niwa_file(fName): # Lake is identified in the first row # What kind of storage it is is in the fifth with open(fName, 'rb') as f: lake = f.readline().split(' ')[0] for i in xrange(0,3): f.readline() column_info = f.readline().split(',')[-2].strip().split(' ')[-1...
[ "def readWoudc(a):\n print('filename',a)\n o = {}\n OBSERVATIONSNames = []\n #open file and read in lines.\n with open(a) as f:\n lines = f.readlines()\n # generate dictionary of dictonaries based on # as headers, and immediate lines as datasets.\n endLine = -1\n for i,l in enumerate(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enumerate processes from session structures
def check_sessions(self, addr_space): ret = dict() for session in self.session_spaces(addr_space): for process in session.processes(): ret[process.obj_vm.vtop(process.obj_offset)] = process return ret
[ "def processes(self):\n ret = self._get_attr(\"processes\")\n return [IGuestProcess(a) for a in ret]", "def findChildProcesses(pid):\n procs={}\n procs=findChildProcessnames(pid)\n\n result=[]\n\n for thing in procs.keys():\n result.append(thing)\n\n return result", "def getPr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enumerate processes from desktop threads
def check_desktop_thread(self, addr_space): ret = dict() for windowstation in windowstations.WndScan(self._config).calculate(): for desktop in windowstation.desktops(): for thread in desktop.threads(): process = thread.ppi.Process.dereference() ...
[ "def enumerate_windows():\n import ctypes.wintypes\n monitors = []\n\n def callback(_monitor, _dc, rect, _data):\n \"\"\"\n Callback for the ctypes EnumDisplayMonitors win32 function.\n \"\"\"\n rct = rect.contents\n monitors.append(Monitor(\n rct.left,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enumerate processes by walking the PspCidTable
def check_pspcid(self, addr_space): ret = dict() # Follow the pointers to the table base kdbg = tasks.get_kdbg(addr_space) PspCidTable = kdbg.PspCidTable.dereference().dereference() # Walk the handle table for handle in PspCidTable.handles(): if handle.get_o...
[ "def findChildProcesses(pid):\n procs={}\n procs=findChildProcessnames(pid)\n\n result=[]\n\n for thing in procs.keys():\n result.append(thing)\n\n return result", "def get_all_processes_pids():\r\n h = CreateToolhelp32Snapshot()\r\n parents = {}\r\n pe = Process32First(h)\r\n whi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enumerate processes using the csrss.exe handle table
def check_csrss_handles(self, all_tasks): ret = dict() for p in all_tasks: if str(p.ImageFileName).lower() == "csrss.exe": # Gather the handles to process objects for handle in p.ObjectTable.handles(): if handle.get_object_type() == "Proce...
[ "def check_pspcid(self, addr_space):\n ret = dict()\n\n # Follow the pointers to the table base\n kdbg = tasks.get_kdbg(addr_space)\n PspCidTable = kdbg.PspCidTable.dereference().dereference()\n\n # Walk the handle table\n for handle in PspCidTable.handles():\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate quarter dates from 2015 to current date
def generate_dates(): current_year = datetime.now().year current_date = datetime.now().strftime('%m%d') years = range(2015, current_year) quarters = ["0331", "0630", "0930", "1231"] all_dates = [] for r in itertools.product(years, quarters): all_dates.append(str(r[0]) + r[1]) for q i...
[ "def get_quarter(date):\n return 1+(date.month-1)//3", "def test_quarter_to_dates():\n # Test quarter that has dates in the same year\n start, end = quarter_to_dates('Q2/2017')\n assert start == '01/01/2017'\n assert end == '03/31/2017'\n\n # Test quarter that has dates in the previous year\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Data initialization (update) for all institutions in the data config file
def init_data(): data_info = load_data_config_file() reports, institutions = data_info["reports"], data_info["institutions"].keys() csv_file_info = get_preprocess_data(reports, institutions, mode='w') return update_data_config_file(csv_file_info)
[ "def _initCommonData(self, log, configFile):\n\t\tif configFile is not None:\n\t\t\twith open(configFile, 'r') as f:\n\t\t\t\tself._commonData = json.loads(f.read())\n\t\telse:\n\t\t\tself._commonData['id'] = random.randrange(255)\n\t\t\tself._commonData['location'] = [0,0,0]\n\t\t\tself._commonData['startRadios'] ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A numerical approximation to the Hessian matrix of cost function at location x0 (hopefully, the minimum)
def hessian ( x0, calculate_cost_function, epsilon=1.e-5, linear_approx=False, *args ): # ``calculate_cost_function`` is the cost function implementation # The next line calculates an approximation to the first # derivative f1 = approx_fprime( x0, calculate_cost_function, epsilon, *args) # This is ...
[ "def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:\n return hessian_approximation(self.f, x)", "def InvHessian(self,x):\n return linalg.inv(self.besthessian(x))", "def getHessian(fgradient):\n def hess(x):\n return evaluateHessian(fgradient,x)\n return hess", "def default_hess...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sumdiv(n) Return the sum of the divisors of n, an integer.
def sumdiv(n): if n == 0: return n tot = 1 count = 0 for p in primes: while n % p == 0: count += 1 n /= p if count: tot *= (p**(count+1) - 1)/(p-1) count = 0 if n == 1: break return tot
[ "def get_sum_divisors(n):\n ret = 1\n for x in range(2, int(math.sqrt(n))+1):\n if n % x == 0:\n ret += x\n if n/x != x:\n ret += n/x\n return ret", "def get_sum_of_divisors(number: int) -> int:\n return sum(divisors(number))", "def sum_divisors(n):\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves the ship by 'step' (Vector2)
def move(self, step): self.position += step * self.speed
[ "def move(self):\n self.steps += 1\n direction = uniform(0, 1)\n if direction < 0.5:\n self.position -= 1\n else:\n self.position += 1", "def step(self):\n tmp = self.path[-1].copy()\n tmp += self.direction\n self.path.append(tmp)\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for a person
def search_person(body): # noqa: E501 if connexion.request.is_json: body = PersonQuery.from_dict(connexion.request.get_json()) # noqa: E501 return dict(results=data_access.search_persons(body)) return dict(results=[])
[ "def person_search():\n\n # Filter to just Volunteers\n s3.filter = FS(\"human_resource.type\") == 2\n\n # Only allow use in the search_ac method\n s3.prep = lambda r: r.method == \"search_ac\"\n\n return crud_controller(\"pr\", \"person\")", "def search_for_people(string):\n list = Member.objec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is person search supported
def search_person_supported(): # noqa: E501 print(session.get("person_search")) if session.get("person_search", True): return "OK", 200 else: return 'Not Implemented', 501
[ "def supports_parameter_search(self):\n return # boolean", "def person_search():\n\n # Filter to just Volunteers\n s3.filter = FS(\"human_resource.type\") == 2\n\n # Only allow use in the search_ac method\n s3.prep = lambda r: r.method == \"search_ac\"\n\n return crud_controller(\"pr\", \"p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the max depth can be reached from location (loc). Stop the search at max depth of 6 on grounds of efficiency.
def get_max_depth(game, loc): # Save the locations that are not reachable or were visited before visited = game._board_state[0:game.height * game.width] # The search is performed by a depth-first search recursive algorithm # 1 is subtracted from result since current location is depth 0 return _get_m...
[ "def getMaxDepth(self):\n return self.getOrDefault(self.maxDepth)", "def max_depth():\n return ctoast.timing_manager_max_depth()", "def max_path_depth(self) -> ConfigNodePropertyInteger:\n return self._max_path_depth", "def bfs_max_depth_heuristic(game, player):\n def _max_depth(p):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get distances from location (loc) to every position in board. The function is implemented using breadthfirst search.
def get_distances(game, loc): blanks = game.get_blank_spaces() # Initialize all distances with max posible distance distances = [float("inf") for i in range(game.height * game.width)] row, col = loc queue = [(row, col)] # Initial location is at 0 distance distances[row + col * game.height] ...
[ "def _compute_distance(self) -> np.ndarray:\n loc = np.expand_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n self.loc_diff = m-m.transpose(0, 2, 1)\n return np.linalg.norm(self.loc_diff, axis=0)", "def _precompute_distances(self, state):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run an experiment in train/val mode.
def main(cfg, mode): experiment = ExperimentLoop(cfg, mode) if mode == TRAIN: experiment.train() elif mode == VAL: experiment.validation()
[ "def do_training():\n train_cls = Train()\n train_cls.run()", "def testModelFnTrainModeExecute(self):\n self.params['encoder_type'] = FLAGS.encoder_type\n train_input_fn = launcher.InputFn(FLAGS.training_data_filepattern,\n FLAGS.batch_size)\n eval_input_fn = la...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Context manager that creates and deletes TF_Buffer.
def tf_buffer(): buf = c_api.TF_NewBuffer() try: yield buf finally: c_api.TF_DeleteBuffer(buf)
[ "def test_context_gc(ctx_new):\n ctx = ctx_new\n ctx.gc_mode = \"context_gc\"\n\n # Buffer\n buff = ctx.buffer(reserve=1024)\n buff = None\n assert ctx.gc() == 1\n\n # Texture\n tex = ctx.texture((10, 10), 4)\n tex = None\n assert ctx.gc() == 1\n\n # Texture Array\n tex_array = c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The function receives a path to a file containing words, and an index that will point to the position of a word in the file which will be the secret word to guess.
def choose_word(file_path, index): with open(file_path,'r') as words: # Open file entered by user in read mode only. words_one_string = words.read() # Return all file content as one string, and assign to parameter 'words_one_string'. splited_words = words_one_stri...
[ "def choose_word(file_path):\r\n # Ask user for index position, a counting number.\r\n word_index = get_counting_number()\r\n # Open file and close automatically.\r\n with open(r\"%s\" % file_path, 'r') as input_file:\r\n input_str = input_file.read()\r\n # Create list from string split by...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The function checks whether the player managed to guess the secret word and thus won the game!
def check_win(secret_word, old_letters_guessed): check_win_word = show_hidden_word(secret_word, old_letters_guessed) # Call the function 'show_hidden_word' to assign the current string of letters and spaces # (and underscores if has) ...
[ "def checkCorrect(word, guessedLetters):\n #return True or False", "def guess_word(self):\n guessed_word = input('Enter the whole word.')\n if guessed_word == self.random_word:\n print(\"You Guessed it!\")\n print('The word is: \"{}\"!\\n'.format(self.random_word))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete the inactive subscription
def api_my_subscription_inactive_delete(self, gogoro_sess=None, csrf_token=None): self.init.authHeader(gogoro_sess, csrf_token) resp = self.init.request('delete', "/api/my/subscription/inactive") return resp
[ "def removeSubscription(subscriber):", "def subscription_deleted(self):\n self._update(\"subscription_status\", \"deleted\")\n self._update(\"is_paying\", False)\n send_email(self, EmailTemplateNames.SUBSCRIPTION_DELETED,\n render_params={\n \"payment_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start a test database in a docker container.
def start_test_database( repo_path, project_name, host=DEFAULT_TEST_DB_HOST, port=DEFAULT_TEST_DB_PORT): # TODO (nb): add a check to ensure that 'port' is free # host is always localhost because we are running it in a local Docker container if host != 'localhost': raise NotImplementedError('...
[ "def start_db(db_pass):\n with settings(warn_only=True):\n run(f'docker run -d --name {db_container_name} --net {network_name} '\n f'-v {db_volume}:/var/lib/postgresql/data '\n f'--restart unless-stopped -e POSTGRES_USER={db_user} '\n f'-e POSTGRES_PASSWORD={db_pass} '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the given text, and nothing else, into the connection. This keyword does not append a newline nor consume the written text. Use `Write` if these features are needed.
def write_bare(self, text): self._verify_connection() telnetlib.Telnet.write(self, text)
[ "def write(self, text):\n self.stream.write(text)\n self.stream.flush()\n return self", "def write(self, text, *args):\n if not text:\n return self\n\n if self._is_new_line:\n self._stream.write(self._indentation * self._indentation_level)\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the prompt used by `Read Until Prompt` and `Login` in the current connection. If `prompt_is_regexp` is given any true value, including any nonempty string, the given `prompt` is considered to be a regular expression. The old prompt is returned and can be used to restore the prompt later.
def set_prompt(self, prompt, prompt_is_regexp=False): self._verify_connection() old = self.prompt self._set_prompt(prompt, prompt_is_regexp) if old[1]: return old[0].pattern, True self.prompt = prompt self.prompt_is_regexp = prompt_is_regexp ...
[ "def set_prompt(self, prompt, prompt_is_regexp=False):\n old = hasattr(self, '_prompt') and self._prompt or (None, False)\n if prompt_is_regexp:\n self._prompt = (re.compile(prompt), True)\n else:\n self._prompt = (prompt, False)\n if old[1]:\n return old...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Each Executor updates the row with index == executor_id. Moreover, when the token arrives the first time to a server, it writes its 'host'+'port' on the corresponding 'host' cell in df
def __init__(self): super().__init__(message_type='token') self.df = pd.DataFrame(columns=['host', 'n_jobs', 'inc'])
[ "def update_proxy_pool(self):\n proxy_list = []\n try:\n resp = requests.get(self.url)\n except ConnectionError as ce:\n print(ce)\n return(1)\n soup = bs(resp.text, \"html.parser\")\n proxy_table = soup.find_all(id='proxylisttable')\n for t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes gradient of each input tensor with respect to loss tensor.
def _compute_gradients(loss_tensor, list_of_input_tensors): list_of_gradient_tensors = tensorflow.gradients( loss_tensor, list_of_input_tensors) for i in range(len(list_of_gradient_tensors)): if list_of_gradient_tensors[i] is not None: continue list_of_gradient_tensors[i] ...
[ "def _gradient(self, inputs, labels):\n sens = Tensor(np.array([1.0], inputs.dtype))\n # get grad of loss over x\n out_grad = self._loss_grad(Tensor(inputs), Tensor(labels), sens)\n if isinstance(out_grad, tuple):\n out_grad = out_grad[0]\n gradient = out_grad.asnumpy()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upsamples classactivation matrix (CAM). CAM may be 1D, 2D, or 3D.
def _upsample_cam(class_activation_matrix, new_dimensions): num_rows_new = new_dimensions[0] row_indices_new = numpy.linspace( 1, num_rows_new, num=num_rows_new, dtype=float ) row_indices_orig = numpy.linspace( 1, num_rows_new, num=class_activation_matrix.shape[0], dtype=float ) ...
[ "def generate_CAM(self, input_image, target_class=None):\n self.gradients = []\n self.feature_maps = []\n\n # Do the forward pass through the model.\n model_prediction = self.model(input_image)\n\n if target_class is None:\n # Set the target class to the predicted class...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers guidedbackprop method with TensorFlow backend.
def _register_guided_backprop(): if (BACKPROP_FUNCTION_NAME not in tensorflow_ops._gradient_registry._registry): @tensorflow_ops.RegisterGradient(BACKPROP_FUNCTION_NAME) def _GuidedBackProp(operation, gradient_tensor): input_type = operation.inputs[0].dtype ret...
[ "def get_apply_gradients_op(self):\n raise NotImplementedError()", "def _add_methods_to_eager_tensor(self):\n\n dummy = tf.constant(0)\n eager_type = type(dummy)\n\n eager_type.native___str__ = eager_type.__str__\n eager_type.native___repr__ = eager_type.__repr__\n\n eager_type.__repr_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates saliency function. This function computes the gradient of activations in the target layer with respect to each input value in the specified layers.
def _make_saliency_function(model_object, target_layer_name, input_layer_indices): output_tensor = model_object.get_layer(name=target_layer_name).output filter_maxxed_output_tensor = K.max(output_tensor, axis=-1) if isinstance(model_object.input, list): list_of_input_te...
[ "def saliency_map(self, input, class_idx=None, retain_graph=False):\n b, c, h, w = input.size()\n\n logit = self.model(input)\n if class_idx is None:\n score = logit[:, logit.max(1)[-1]].squeeze()\n else:\n score = logit[:, class_idx].squeeze() \n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds input layers connected to target layer.
def _get_connected_input_layers(model_object, list_of_input_matrices, target_layer_name): connected_layer_objects = cnn.get_connected_input_layers( model_object=model_object, target_layer_name=target_layer_name) num_input_matrices = len(list_of_input_matrices) num_c...
[ "def _get_layers_with_node_idx_as_input(node_idx, nodes):\n layer_names = []\n for idx, node in enumerate(nodes):\n if node[consts.OPERATION] == consts.NO_OP:\n continue\n for input_list in node[consts.INPUTS]:\n if input_list[0] == node_idx:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the description, truncated to 300 characters
def getFriendlyDescription(self): if not self.description: return '' if len(self.title) > 65: return self.description[:120] + '...' return self.description[:200] + '...' if len(self.description) > 200 else self.description
[ "def desc_short(self):\n if len(self.description) >= 250:\n desc = self.description[:250]\n desc = desc.rsplit(\" \", 1)[0] + \"...\"\n return desc\n return self.description", "def _description_string(self) -> str:", "def Description(self):\n portal_transfor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add source names to the Names table in the database. Provide either two lists of sources and other_names or a 2D names_table.
def add_names(db, sources=None, other_names=None, names_table=None): if names_table is not None and sources is not None: msg = "Both names table and sources list provided. Provide one or the other" logger.error(msg) raise RuntimeError(msg) names_data = [] if sources is not None or...
[ "def add_source_names(activities):\n for key in activities.keys():\n activities[key]['source_name'] = key\n\n return activities", "def addsourcefield(dataframe, fieldName, source):\n\tbase = os.path.basename(source)\n\tdataframe[fieldName] = base\n\treturn", "def add_source(self):\n\t\tsource_name=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clear english text (ignore words with wrong tag, ignore stop words i do lemmatization)
def clear_english_text(self, text): clean_text = [] tagged_text = pos_tag(word_tokenize(text)) for word, tag in tagged_text: wn_tag = converter.penn_to_wn(tag) # ignore words with wrong tag if wn_tag not in (wn.NOUN, wn.ADJ, wn.ADV): continu...
[ "def clear_keyword_terms(self):\n pass", "def clear_text(sourse_text, exclude_symbol):\n for i in exclude_symbol:\n sourse_text = sourse_text.replace(i, '')\n\n return sourse_text", "def filter_sentence(citing_sentence):\r\n\r\n if citing_sentence == None:\r\n return \" \" #filtere...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }