query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Return breadcrumbs for this content.
def get_breadcrumbs(self): breadcrumbs_view = getMultiAdapter((self.context, self.request), name='breadcrumbs_view') result = [] for crumb in breadcrumbs_view.breadcrumbs(): result.append({ 'title': crumb['Title'], 'url': crumb['absolute_url'] ...
[ "def get_breadcrumbs(self, item):\n yield Link(_(\"Homepage\"), self.homepage_url)\n\n if item:\n for ancestor in item.ancestors:\n yield Link(ancestor.title, self.request.link(ancestor))\n\n yield Link(item.title, self.request.link(item))", "def retrieve_breadcr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each line of the plain text string, replaces the SPECIAL FIELDS with JSON code described in utils/questionnaire_tag_description.docx
def replace_with_json(line, id_attr=''): embedded_json_letter = 'a' match = re.search(PLAIN_TEXT_TAGS_REGEX, line) #A match object def json_validation(): validation = {} #a dictionary if(match.group(5)): validation_pairs = match.group(5).split(";;") #delinieated using ; for validation_pair in...
[ "def replace_with_json(line, id_attr=''):\r\n\r\n embedded_json_letter = 'a'\r\n match = re.search(PLAIN_TEXT_TAGS_REGEX, line)\r\n def json_validation():\r\n #TODO: We need to extract 'default' from here and up to cloze\r\n validation = {}\r\n if(match.group(5)):\r\n validation_pairs = match.group...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a list of attendees based on existing data
def __init_updated_attendees(auth_user_id: int, meeting) -> list: attendees = [] if isinstance(meeting, Meeting): for associate_user in meeting.associate_users: if associate_user.user.id != auth_user_id: attendee = dict() attendee['emai...
[ "def generate_list_of_attendees():\n n = generate_attendee_number()\n temp_list = []\n for i in range(n):\n temp_list.append(Attendee())\n return temp_list", "def get_attendees(self, ncr_calendar_id):\n\n _ATTENDEE_LIST = []\n\n calendar_item = self.calendar_item(ncr_calendar_id)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans input text by tokenizing, removing punctuation and tokenizing.
def clean_text(text): # Lowercase text = text.lower() # Remove punctuation translator = str.maketrans('', '', string.punctuation) text = text.translate(translator) # Tokenize text = word_tokenize(text) return text
[ "def remove_punctuations(self, text: List[str]):\n tokenized_string = ' '.join([str(text[i]) for i in range(len(text))])\n #\n x = re.sub(r\"['-?-!]\", '', tokenized_string)\n x = re.sub(r\"[^a-zA-Z]\", ' ', x)\n x = re.sub(r'\\s', ' ', x)\n x = re.sub(r' +', ' ', x)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses PCA to visualize the Word2Vec embeddings that were created.
def visualize_embeddings(model): X = model[model.wv.vocab] pca = PCA(n_components=2) result = pca.fit_transform(X) plt.scatter(result[:, 0], result[:, 1]) words = list(model.wv.vocab) for i, word in enumerate(words): plt.annotate(word, xy=(result[i, 0], result[i, 1])) plt.show()
[ "def plot_pca(w2v_model):\n X = w2v_model[w2v_model.wv.vocab]\n pca = PCA(n_components=2)\n w2v_result = pca.fit_transform(X)\n w2v_words = list(w2v_model.wv.vocab)\n plt.figure()\n plt.scatter(w2v_result[:, 0], w2v_result[:, 1], marker='.')\n wordlist = [\"mies\", \"kuningas\", \"nainen\", \"k...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the path where Delphi is installed
def _getDelphiPath(env, version = None): if not version: version = r'DELPHI7' if not '\\' in version: version = VERSIONS.__dict__.get(version, VERSIONS.DELPHI7) KEYPATH = r'SOFTWARE\%s\RootDir' % version return env.RegGetValue(KEYPATH) or ''
[ "def get_install_path():\n\n return os.path.dirname(__file__)", "def get_path() -> Optional[Path]:\n return _WIN_SDK_PATH", "def get_nuke_path():\n\n return nuke.EXE_PATH", "def get_pth_dir(executable):\n output = runner.run([\n executable,\n '-c',\n 'import json, sys; print(j...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Action to build a Delphi program, it change to the dir were the source file is before running the compiler, prior to this, we remember the path to the target dir
def DelphiCommandLineGenerator(source, target, env, for_program, for_signature = 0, for_locale = 0): use_packages = 0 packages = [] source_packages = [] if for_program: use_packages = env.get('DCC32_USEPACKAGES') packages = env.get('DCC32_PACKAGES', []) if is_Sequence(so...
[ "def build(ctx):\n cmd = \"pyinstaller -n dploy --onefile \" + os.path.join(\"dploy\", \"__main__.py\")\n ctx.run(cmd, **RUN_ARGS)", "def generate(env):\r\n\r\n version = env.get('DELPHI_VERSION', None) \r\n delphi = env.Dir(_getDelphiPath(env, version))\r\n env['DELPHI'] = delphi\r\n env['DE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generator that constructs the command line to build Delphi package
def DelphiPackageCompileGenerator(source, target, env, for_signature = 0): return DelphiCommandLineGenerator(source, target, env, for_program=0, for_signature=for_signature)
[ "def DelphiCommandLineGenerator(source, target, env, for_program, for_signature = 0, for_locale = 0):\r\n\r\n use_packages = 0\r\n packages = []\r\n source_packages = []\r\n\r\n if for_program:\r\n use_packages = env.get('DCC32_USEPACKAGES')\r\n packages = env.get('DCC32_PACKAGES', [])\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generator that constructs the command line to build Delphi program
def DelphiProgramCompileGenerator(source, target, env, for_signature = 0): return DelphiCommandLineGenerator(source, target, env, for_program=1, for_signature=for_signature)
[ "def DelphiCommandLineGenerator(source, target, env, for_program, for_signature = 0, for_locale = 0):\r\n\r\n use_packages = 0\r\n packages = []\r\n source_packages = []\r\n\r\n if for_program:\r\n use_packages = env.get('DCC32_USEPACKAGES')\r\n packages = env.get('DCC32_PACKAGES', [])\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Emitter para el tlibimp, para que elimine tanto el .pas que genera como el .dcr
def DelphiTypelibEmitter(target, source, env): node = target[0] outdir = env.Dir(node.dir) base, ext = SCons.Util.splitext(node.name) targets = [ outdir.File(base + '.pas') , outdir.File(base + '.dcr') ] return (targets, source)
[ "def emit(self, ctx, modules, fd):\n return", "def DelphiTypelibAction(target, source, env):\r\n\r\n for src in source:\r\n\r\n TLIB_COM = '$TLIB_BIN $TLIB_FLAGS -D' + str(target[0].dir) + ' ' + os.path.abspath(str(src))\r\n TLIB_MSG = '**** [TLIBIMP] Generando units ' + \\\r\n '\\n\\tSOURC...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Action to generate _TLB.pas
def DelphiTypelibAction(target, source, env): for src in source: TLIB_COM = '$TLIB_BIN $TLIB_FLAGS -D' + str(target[0].dir) + ' ' + os.path.abspath(str(src)) TLIB_MSG = '**** [TLIBIMP] Generando units ' + \ '\n\tSOURCE: ' + str(src) + \ '\n\tTARGET: ' + str(target[0]) + \ '...
[ "def Generate(self, types, filename_h, filename_c):\n # Declarations (.h file)\n h = open(filename_h, \"w\")\n h.write(_COPYRIGHT_HEADER)\n guard_name = \"TRUNKS_%s_\" % filename_h.upper().replace(\".\", \"_\")\n h.write(_HEADER_FILE_GUARD_HEADER % {\"name\": guard_name})\n h.write(\"\"\"\n#includ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add Builders and construction variables for delphi to an Environment.
def generate(env): version = env.get('DELPHI_VERSION', None) delphi = env.Dir(_getDelphiPath(env, version)) env['DELPHI'] = delphi env['DELPHI_IDE'] = delphi.File(IDES.__dict__.get(version, IDES.DELPHI)) env['DELPHI_BPGSUFFIX'] = BPGSUFFIX.__dict__.get(version, BPGSUFFIX.DELPHI) ...
[ "def _build_environment(func, bound_args):\n spec = [(\"arg\" + str(i), t) for i, t in enumerate(bound_args)]\n\n exec_glbls = dict(spec=spec)\n exec_glbls[\"jitclass\"] = jitclass\n assign_env = \"; \".join(f\"self.arg{i} = arg{i}\" for i, t in enumerate(bound_args))\n env_args =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_test_view_access_ Test accessing the data via the view
def test_view_access(self): db = mock.Mock() db._database_name = 'unittest' ddoc = DesignDocument(db, "_design/tests") ddoc._database_host = "https://bob.cloudant.com" view1 = View(ddoc, "view1", map_func=self.map_func) self.assertEqual( view1.url, ...
[ "def testViewViewAuthenticated(self):\n self.client.login(username='samuel', password='testing')\n response = self.client.get(reverse('pub_view', args=[1]))\n self.assertEquals(response.status_code, 200)\n self.assertEquals(type(response.context[-1]['reading']),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns id of node, fallsthrough to phy if not set on this overlay
def id(self): #TODO: make generic function for fall-through properties stored on the anm key = "id" if key in self._interface: return self._interface.get(key) else: if self.overlay_id == "input": return # Don't fall upwards from input -> phy a...
[ "def NodeId(self) -> int:", "def _get_node_id(self) -> Optional[str]:\n return self._uid[-1] if len(self._uid) == 3 else None", "def get_node_id(node):\n return str(node.id)", "def node_id(self):\n ret = self._get_attr(\"nodeId\")\n return ret", "def node_id(self) -> str:\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns if this interface is bound to an edge on this layer
def is_bound(self): # TODO: make this a function return len(self.edges()) > 0
[ "def is_edge(self):\n return self.num_tops()>1", "def isEdgeReaching(self):\n return transform.pos2_edge_isOpposing(self[0], self[-1])", "def has_edge(self, v, w):\n return (v,w) in self.edges()", "def is_edge(self) -> \"bool\":\n return self._value.getType() == Value.EVAL", "def is_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns corresponding interface in specified overlay
def __getitem__(self, overlay_id): if not self.anm.has_overlay(overlay_id): log.warning('Trying to access interface %s for non-existent overlay %s' , self, overlay_id) return None if not self.node_id in self.anm.overlay_nx_graphs[overlay_id]: ...
[ "def get_interface_for_name(protocols: Iterable[Protocol],\n target_interface_name: str) -> Optional[Interface]:\n for protocol in protocols:\n for interface in protocol.interfaces:\n if interface.name == target_interface_name:\n return interface\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all edges from node that have this interface ID This is the convention for binding an edge to an interface
def edges(self): # edges have _interfaces stored as a dict of {node_id: interface_id, } valid_edges = [e for e in self.node.edges() if self.node_id in e.raw_interfaces and e.raw_interfaces[self.node_id] == self.interface_id] return list(valid_edges...
[ "def get_edges():\r\n\r\n edges = traci.edge.getIDList()\r\n return list(filter(lambda x: x[0] != \":\", edges))", "def get_edges(network_id):\n edges = get_generic_element(\n network_id, 'edge', ignore='function', child_key='lane')\n return edges", "def edges(self):\n return self.gene...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the binary vector sigma_0 that corresponds to the index m, where m is a int between 0 and 2N typ determins if the neuron activation state is defined in {1,1} or {0,1} typ=1 > {1,1} typ=0 > {0,1}
def stateIndex2stateVec(m,N,typ = 1): sigma_0 = [ (1+typ)* (int(float(m)/2**i) % 2) - typ for i in range(0,N)] # typ=1 --> [-1,1] typ=0 --> [0,1] sigma_0.reverse() sigma_0 = np.array(sigma_0,dtype=np.uint8) return sigma_0
[ "def initial_sigma(map_shape):\n return math.ceil(functools.reduce(lambda x, y: x + y ** 2, map_shape) ** 0.5)", "def createSmn(img_size, m, n, alphaBTV):\n N1, N2 = img_size\n alpha = alphaBTV**(np.abs(m)+np.abs(n))\n S = createSv((N1,N2), m).dot(createSh((N1,N2), n))\n return alpha*(ident...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the index m that corresponds to the binary vector sigma_0, where m is a int between 0 and 2N typ determins if the neuron activation state is defined in {1,1} or {0,1} typ=1 > {1,1} typ=0 > {0,1}
def stateVec2stateIndex(sigma,N,typ = 1): k=int(0) for i in range(0,N): k=k+(sigma[i]+typ)/(1+typ)*2**(N-i-1) # typ=1 --> [-1,1] typ=0 --> [0,1] return int(k)
[ "def stateIndex2stateVec(m,N,typ = 1):\n sigma_0 = [ (1+typ)* (int(float(m)/2**i) % 2) - typ for i in range(0,N)] # typ=1 --> [-1,1] typ=0 --> [0,1]\n sigma_0.reverse()\n sigma_0 = np.array(sigma_0,dtype=np.uint8)\n return sigma_0", "def index(self, state):\n try:\n idx = self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of binary vectors sigmas that correspond to the list of indexs ms, where m in ms is a int between 0 and 2N typ determins if the neuron activation state is defined in {1,1} or {0,1} typ=1 > {1,1} typ=0 > {0,1}
def stateIndex2stateVecSeq(ms,N,typ = 1): # type: (state index sequence, number of neurons, typ) -> state vector sequence sigmas = [ stateIndex2stateVec(m,N,typ) for m in ms] sigmas = np.array(sigmas) return sigmas
[ "def stateIndex2stateVec(m,N,typ = 1):\n sigma_0 = [ (1+typ)* (int(float(m)/2**i) % 2) - typ for i in range(0,N)] # typ=1 --> [-1,1] typ=0 --> [0,1]\n sigma_0.reverse()\n sigma_0 = np.array(sigma_0,dtype=np.uint8)\n return sigma_0", "def indices():\n return [1.0, 3.0, 1.0, 3.0, 1.0]", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of bindexes ms that correspond to the list of binary vectors sigmas, where m in ms is a int between 0 and 2N typ determins if the neuron activation state is defined in {1,1} or {0,1} typ=1 > {1,1} typ=0 > {0,1}
def stateVec2stateIndexSeq(sigmas,N,typ = 1): ms = [ stateVec2stateIndex(s,N,typ) for s in sigmas] ms = np.array(ms) return ms
[ "def stateIndex2stateVec(m,N,typ = 1):\n sigma_0 = [ (1+typ)* (int(float(m)/2**i) % 2) - typ for i in range(0,N)] # typ=1 --> [-1,1] typ=0 --> [0,1]\n sigma_0.reverse()\n sigma_0 = np.array(sigma_0,dtype=np.uint8)\n return sigma_0", "def getAllWSTIndices(m):\r\n assert type(m) is int and m>=1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
transiton function. net1 is the network that generates the ttransitions If sigma_path0 is a binary vector it generates the corresponding transtions. If sigma_path0 is a list of binary vectors it generates a list with the corresponding transtions. typ determins if the neuron activation state is defined in {1,1} or {0,1}...
def transPy(sigma_path0,net1,N,typ = 1, thr = 0,signFuncInZero = 1): if not net1 == np.float32: net1 = np.float32(net1) if not sigma_path0 == np.float32: sigma_path0 = np.float32(sigma_path0) sigma_path1 = np.array(net1.dot(sigma_path0.T)) #print('sigma_path1 tupe',type(sigma_path1)) ...
[ "def transActiv(sigma_path0, net1, N, typ=1, thr=0, signFuncInZero=1):\n if not net1 == np.float32:\n net1 = np.float32(net1)\n if not sigma_path0 == np.float32:\n sigma_path0 = np.float32(sigma_path0)\n sumx = net1.dot(sigma_path0.T)\n # print sigma_path1\n # if signFuncInZero == 1:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
transiton function. net1 is the network that generates the ttransitions If sigma_path0 is a binary vector it generates the corresponding transtions. If sigma_path0 is a list of binary vectors it generates a list with the corresponding transtions. typ determins if the neuron activation state is defined in {1,1} or {0,1}...
def transActiv(sigma_path0, net1, N, typ=1, thr=0, signFuncInZero=1): if not net1 == np.float32: net1 = np.float32(net1) if not sigma_path0 == np.float32: sigma_path0 = np.float32(sigma_path0) sumx = net1.dot(sigma_path0.T) # print sigma_path1 # if signFuncInZero == 1: # sigma...
[ "def transPy(sigma_path0,net1,N,typ = 1, thr = 0,signFuncInZero = 1):\n if not net1 == np.float32:\n net1 = np.float32(net1)\n if not sigma_path0 == np.float32:\n sigma_path0 = np.float32(sigma_path0)\n sigma_path1 = np.array(net1.dot(sigma_path0.T))\n #print('sigma_path1 tupe',type(sigma_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get outflow for a given timestep
def get_outflow(self, timestep): return self._outflow[timestep]
[ "def get_inflows_outflows(self, nodeid:int, timewindow:str): \n in_channels, out_channels = get_in_out_channel_numbers(self.gc, nodeid)\n in_flows = [self.hydro.get_channel_flow(cid,'upstream',timewindow=timewindow) for cid in in_channels]\n out_flows = [self.hydro.get_channel_flow(cid,'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure epochs in phase units.
def configure_epochs(self, epochs, time_of_periastron=None): if time_of_periastron is not None: self.time_of_periastron = time_of_periastron self.epochs = ((epochs - self.time_of_periastron) % self.period) / self.period else: self.epochs = e...
[ "def pre_epoch(self):\n pass", "def updateEpochs(self):\n self.epochs={}\n nEpochs=len(self.epochPerDac)\n for epochIndex,epoch in enumerate(self.epochPerDac):\n for key in sorted(epoch.keys()):\n if not key in self.epochs.keys():\n self.epo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate centroid velocity of a binary Keplerian orbit.
def keplerian_model_centroid_velocity(self): # Assert correct params have been set. try: assert self.period is not None assert self.eccentricity is not None assert self.rv_semi_amplitude is not None assert self.argument_of_periastron is not None ...
[ "def convolutional_model_centroid_velocity(self):\n # Assert correct params have been set.\n try:\n assert self.period is not None\n assert self.eccentricity is not None\n assert self.rv_semi_amplitude is not None\n assert self.argument_of_periastron is not ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate centroid velocity of convolved/time averaged Keplerian orbit.
def convolutional_model_centroid_velocity(self): # Assert correct params have been set. try: assert self.period is not None assert self.eccentricity is not None assert self.rv_semi_amplitude is not None assert self.argument_of_periastron is not None ...
[ "def keplerian_model_centroid_velocity(self):\n # Assert correct params have been set.\n try:\n assert self.period is not None\n assert self.eccentricity is not None\n assert self.rv_semi_amplitude is not None\n assert self.argument_of_periastron is not None...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads the lines.txt file and returns a dictionary that translates the line into code. All returned dictionaries contain... a 'type' (whatever type of action it is) a 'condition' (if a certain condition is required before the action can be taken) and possibly more, depending on the 'type' of action
def line_to_code(line): global choices_to_read line = line.strip() # first test if the action has a condition, indicated by brackets if len(line) > 0 and line[0] == '[': # if so, store the condition in a variable, then trash everything that was in brackets and continue as usual lin...
[ "def parseFile(lines: List[str]) -> Dict[str, List[Tuple[int, int]]]:\n points: Dict[str, List[Tuple[int, int]]] = dict()\n newChar = False\n cont = False\n point = []\n sequenceClass = None\n\n for line in lines[1:]:\n if '.COMMENT' in line and 'Class' in line and '[' in line and '#' not i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the current dictionary, assuming it's dialogue, and call the "say" function on the given character Note that characters are referred to by their nicknames, which are part of the dictionary and indicated before the colon.
def draw_speech(): global someone_speaking current_line = lines[scene] current_nick = current_line['nick'] current_text = current_line['line'] current_character = None for character in characters: if character.nick == current_nick: current_character = character ...
[ "async def character_info(self, ctx, *, character: str):\n\t\tscopes = [\"characters\"]\n\t\tuser = ctx.message.author\n\t\tcharacter = character.title()\n\t\tcharacter.replace(\" \", \"%20\")\n\t\tendpoint = \"characters/{0}\".format(character)\n\t\tkeydoc = await self.fetch_key(user)\n\t\ttry:\n\t\t\tawait self._...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate through the Background instances until one of the names matches the background_name paramter Then, set the current_background to that Background object.
def change_background(background_name): global current_background for background in backgrounds: if background.name == background_name: current_background = background
[ "def GetBackground2(self):\n ...", "def GetBackground(self):\n ...", "def set_background(self, new_bg_id, *screens): \n new_animated_bg = AnimationGenerator.factory(new_bg_id, self.resolution, PARAMS.ANIMATION_TIME,\\\n INIT_PARAMS.ALL_FPS,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send the specified text to the specified textbox. If the name should be displayed, tell the textbox who's speaking.
def say(self, text, textbox=textbox1, with_name=True): textbox.text = text if with_name: textbox.name = self.name
[ "def add_text(self, text):\n self.textbox['text'] = text", "def set_Text(self, value):\n super(SendMessageInputSet, self)._set_input('Text', value)", "def write_to_chat(self,text):\r\n textbox = self.browser.find_element_by_class_name(\"slateTextArea-1Mkdgw\")\r\n sleep(0.5)\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
if the button is onscreen and the mouse clicks on it, return the 'output' message otherwise, return None
def is_pressed(self): mouse = pygame.mouse.get_pos() if self.textbox.rect != None and self.textbox.rect.collidepoint(mouse) and mouse_pressed == True: return self.output else: return None
[ "def draw(self, screen: pygame.Surface) -> bool:\n action = False # button is not clicked by default\n\n # get mouse position\n pos = pygame.mouse.get_pos()\n\n # check mouseover and clicked conditions\n if self.rect.collidepoint(pos): # if image collides with mouse\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add a new button to the current choice's list, to represent the new option.
def add_option(self, text, name): button_textbox = Textbox([5 / 6 * display_w, 1 / 6 * display_h], text) output = name button = Button(button_textbox, output) self.buttons.append(button)
[ "def add_choice(self, choice):\n value, desc = choice\n if len(self._pool):\n # pool is not empty so get buttons from there\n button = self._pool.pop(0)\n else:\n # create a new radio button since pool is empty\n button = RadioButton(self)\n bu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show a new selection.
def _show_selection(self): i,j = self._find_label_coordinates(self._selected_date) label = self._day_labels[i,j] label.configure(background=self._sel_bg, foreground=self._sel_fg) label.unbind("<Enter>") label.unbind("<Leave>") self._selection_is_vi...
[ "def _show_selection(self):\n\n i, j = self._find_label_coordinates(self._selected_date)\n\n label = self._day_labels[i, j]\n\n label.configure(background=self._sel_bg, foreground=self._sel_fg)\n\n label.unbind(\"<Enter>\")\n label.unbind(\"<Leave>\")\n\n self._selection_is...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updated calendar to show the previous day.
def select_prev_day(self): if self._selected_date is None: self._selected_date = datetime.datetime(self._year, self._month, 1) else: self._clear_selection() self._selected_date = self._selected_date - self.timedelta(days=1) self._build_calendar(self._s...
[ "def select_prev_day(self):\n if self._selected_date is None:\n self._selected_date = datetime.datetime(self._year, self._month, 1)\n else:\n self._clear_selection()\n self._selected_date = self._selected_date - self.timedelta(days=1)\n\n self._build_calendar(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update calendar to show the next day.
def select_next_day(self): if self._selected_date is None: self._selected_date = datetime.datetime(self._year, self._month, 1) else: self._clear_selection() self._selected_date = self._selected_date + self.timedelta(days=1) self._build_calendar(self....
[ "def select_next_day(self):\n\n if self._selected_date is None:\n self._selected_date = datetime.datetime(self._year, self._month, 1)\n else:\n self._clear_selection()\n self._selected_date = self._selected_date + self.timedelta(days=1)\n\n self._build_calendar(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updated calendar to show the previous week.
def select_prev_week_day(self): if self._selected_date is None: self._selected_date = datetime.datetime(self._year, self._month, 1) else: self._clear_selection() self._selected_date = self._selected_date - self.timedelta(days=7) self._build_calendar(se...
[ "def select_prev_week_day(self):\n if self._selected_date is None:\n self._selected_date = datetime.datetime(self._year, self._month, 1)\n else:\n self._clear_selection()\n self._selected_date = self._selected_date - self.timedelta(days=7)\n\n self._build_calend...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update calendar to show the next week.
def select_next_week_day(self): if self._selected_date is None: self._selected_date = datetime.datetime(self._year, self._month, 1) else: self._clear_selection() self._selected_date = self._selected_date + self.timedelta(days=7) self._build_calendar(se...
[ "async def gcalendar_eventsnextweek(self):\n\n\t\tawait self.events_next_week()", "def next(self):\n return Week.for_date(self.day(7))", "async def gcalendar_eventsthisweek(self):\n\n\t\tawait self.events_this_week()", "def update_calendar(self, calendar_form):\n pass", "def select_next_day(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updated calendar to show the previous year.
def prev_year(self): if self._selection_is_visible: self._clear_selection() self._build_calendar(self._year-1, self._month) # reconstruct calendar
[ "def prev_year(self):\n\n if self._selection_is_visible: self._clear_selection()\n\n self._build_calendar(self._year - 1,\n self._month) # reconstruct calendar", "def prevyear(self, *event):\n self.index = self.listyear.index(self.currfolder.year)\n self.ol...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update calendar to show the next year.
def next_year(self): if self._selection_is_visible: self._clear_selection() self._build_calendar(self._year+1, self._month) # reconstruct calendar
[ "def next_year(self):\n\n if self._selection_is_visible: self._clear_selection()\n\n self._build_calendar(self._year + 1,\n self._month) # reconstruct calendar", "def nextyear(self, *event):\n self.index = self.listyear.index(self.currfolder.year)\n self.ol...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the selected date.
def set_selection(self, date): if self._selected_date is not None and self._selected_date != date: self._clear_selection() self._selected_date = date self._build_calendar(date.year, date.month) # reconstruct calendar
[ "def set_date (self, month=None, year=None):\n if month:\n self.year_month_choice.set_month (month)\n if year:\n self.year_month_choice.set_year (year)", "def edit_date(self, new_date):\n self.date = new_date", "def SetSelDay(self, sel):\n self.sel_lst = sel", "def set_start_date...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get current internet speed and update down and up speeds
def get_internet_speed(self): self.driver.get('https://fast.com/') time.sleep(40) # sleeps for 40 sec to let the test complete show_more_info_btn = self.driver.find_element_by_id('show-more-details-link') show_more_info_btn.click() self.down = int(round(float(self.driver.find_el...
[ "def get_speed(self):\n return float(self.send('speed?'))", "def get_speed(self):\n std_out, _, _ = self.run_command(\"speedtest-cli --simple\", default_asserts=True)\n print(std_out)\n\n current_ping = float(std_out[0].replace('Ping: ', '').replace(' ms', ''))\n current_downloa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a 'Highlight' annotation that covers the area given by quadpoints.
def highlight_annotation(quadpoints, contents=None, author=None, subject=None, color=YELLOW, alpha=1, flag=4): qpl = [] print quadpoints for x0,y0,x1,y1 in quadpoints: qpl.extend([x0, y1, x1, y1, x0, y0, x1, y0]) # The rectangle needs to contain the highlighted region fo...
[ "def geojson_highlight(bounds, world_bounds):\n return { 'type' : 'FeatureCollection',\n 'features' : [\n {\n 'type': 'Feature',\n 'geometry': world_poly_with_hole(bounds)\n }\n ]\n }", "def make_polygon(\n class_name...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes , layout and keyword and returns bbox location.
def get_location(keyword,layout,x,y): keyword_length = len(keyword) locations = [] print layout.bbox for obj in layout._objs: if isinstance(obj,LTTextBoxHorizontal) : for o in obj._objs : arr = o._objs index = 0 line_lengt...
[ "def process_mtcnn_bbox(bbox, im_shape):\n y0, x0, y1, x1 = bbox[0:4]\n w, h = int(y1 - y0), int(x1 - x0)\n length = (w + h)/2\n center = (int((x1+x0)/2),int((y1+y0)/2))\n new_x0 = np.max([0, (center[0]-length//2)])#.astype(np.int32)\n new_x1 = np.min([im_shape[0], (center[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse file to data list
def parse_data_file(self, file_name: str) -> List[Tuple[str, int]]: with open(file_name, "r") as f: data_list = [] for line in f.readlines(): path, target = line.split() target = int(target) data_list.append((path, target)) return d...
[ "def parse_file(self, data_file_path) -> list:\n rows = []\n logger.info('opening file `%s`', self.data_file)\n\n with open(data_file_path) as data:\n for line in data:\n values = self.parser.parse(line)\n # each row must have NAMED values so a dict is r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Standardise the API output so the consumer doesn't have to worry about whether a link is internal or external
def get_api_representation(self, value, context=None): original_retval_dict = super().get_api_representation(value, context=context) retval = None # If there's both an internal link and an exernal link configured, the internal # one takes priority. Also, we only want to return flat URL,...
[ "def standardize_wiki_url(processed_link):\n\tif processed_link == \"wiki\" or processed_link == \"w\":\n\t\tstandardized_link = \"www.wikipedia.org\"\n\telse:\n\t\tstandardized_link = processed_link\n\n\treturn standardized_link", "def test_reformat_weburl_2(self):\n url = ''\n self.assertEqual(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if Volume file from given path has supported extension.
def has_valid_ext(path: str) -> bool: return is_valid_ext(get_extension(path))
[ "def validate_format(path: str):\n\n if not has_valid_ext(path):\n raise UnsupportedVolumeFormat(\n f\"File {path} has unsupported volume extension. Supported extensions: {ALLOWED_VOLUME_EXTENSIONS}\"\n )", "def _is_file_ext_supported(file_ext):\n return file_ext.lower() in supporte...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise error if Volume file from given path couldn't be read or file extension is not supported.
def validate_format(path: str): if not has_valid_ext(path): raise UnsupportedVolumeFormat( f"File {path} has unsupported volume extension. Supported extensions: {ALLOWED_VOLUME_EXTENSIONS}" )
[ "def validate_format(path: str) -> None:\n try:\n get_image_size_and_frames_count(path)\n except Exception as e:\n raise VideoReadException(\n \"Error has occured trying to read video {!r}. Original exception message: {!r}\".format(\n path, str(e)\n )\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rescale intensity value using the given slope and intercept.
def rescale_slope_intercept(value: float, slope: float, intercept: float) -> float: return value * slope + intercept
[ "def transform_image_slope(img):\n\n # Take the same size as trasform_image_side\n img = img.resize((12,12), Image.ANTIALIAS)\n\n # Apply shear\n transform = numpy.matrix(numpy.identity(3))\n transform *= numpy.matrix(\"[0.75,-0.5,3;0.25,0.5,-3;0,0,1]\")\n transform = numpy...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read DICOM tags from a DICOM file.
def read_dicom_tags( path: str, allowed_keys: Union[None, List[str]] = _default_dicom_tags, anonymize: bool = True, ): import SimpleITK as sitk reader = sitk.ImageFileReader() reader.SetFileName(path) reader.LoadPrivateTagsOn() reader.ReadImageInformation() vol_info = {} for k...
[ "def DicomRead(filename):\n return _ecvl.DicomRead(filename)", "def dcmread(dicom_path, read_header=False):\n ds = pydicom.dcmread(dicom_path)\n try:\n img = ds.pixel_array\n except Exception:\n img_itk = itk.ReadImage(dicom_path)\n img = itk.GetArrayFromImage(img_itk)\n im...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encodes a volume from NumPy format into a NRRD format.
def encode(volume_np: np.ndarray, volume_meta: dict) -> bytes: directions = np.array(volume_meta["directions"]).reshape(3, 3) directions *= volume_meta["spacing"] volume_bytes = nrrd_encoder.encode( volume_np, header={ "encoding": "gzip", # "space": "left-posterior-...
[ "def encode_volume(chunk):\n # Rearrange the image for Neuroglancer\n chunk = np.moveaxis(chunk.reshape(chunk.shape[0],chunk.shape[1],chunk.shape[2],1),\n (0, 1, 2, 3), (3, 2, 1, 0))\n assert chunk.ndim == 4\n buf = chunk.tobytes()\n return buf", "def write_raw(self, raw):\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for DICOM series in the directory and its subdirectories.
def inspect_dicom_series(root_dir: str): import SimpleITK as sitk found_series = {} for d in os.walk(root_dir): dir = d[0] reader = sitk.ImageSeriesReader() sitk.ProcessObject_SetGlobalWarningDisplay(False) series_found = reader.GetGDCMSeriesIDs(dir) sitk.ProcessObje...
[ "def get_dicoms(series_path: str) -> List[Types.SeriesObj]:\n try:\n dicoms = []\n for dicom in list(filter(lambda x: \".dcm\" in x, os.listdir(series_path))):\n d = process_local_DICOM(f\"{series_path}{dicom}\")\n dicoms.append(d)\n\n return dicoms\n except Exceptio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read DICOM series volumes with given paths.
def read_dicom_serie_volume(paths: List[str], anonymize: bool = True) -> Tuple[sitk.Image, dict]: import SimpleITK as sitk reader = sitk.ImageSeriesReader() reader.SetFileNames(paths) sitk_volume = reader.Execute() sitk_volume = _sitk_image_orient_ras(sitk_volume) dicom_tags = read_dicom_tags(...
[ "def get_dicoms(series_path: str) -> List[Types.SeriesObj]:\n try:\n dicoms = []\n for dicom in list(filter(lambda x: \".dcm\" in x, os.listdir(series_path))):\n d = process_local_DICOM(f\"{series_path}{dicom}\")\n dicoms.append(d)\n\n return dicoms\n except Exceptio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inspect a directory for NRRD series by recursively listing files with the ".nrrd" extension and returns a list of NRRD file paths found in the directory.
def inspect_nrrd_series(root_dir: str) -> List[str]: nrrd_paths = list_files_recursively(root_dir, [".nrrd"]) logger.info(f"Total {len(nrrd_paths)} nnrd series in directory {root_dir}") return nrrd_paths
[ "def find_nrrd(directory):\n directory_contents = os.listdir(directory)\n file_with_extension = [file for file in directory_contents if \".nrrd\" in file]\n return \"{}/{}\".format(directory, file_with_extension[0])", "def get_nrrd_files(directory):\n t2, adc, bval = \"\", \"\", \"\"\n directory_co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the `.AsyncHTTPClient` instance to be used for auth requests. May be overridden by subclasses to use an HTTP client other than the default.
def get_auth_http_client(self): return httpclient.AsyncHTTPClient()
[ "def _get_client_adapter(self):\n return httpclient.AsyncHTTPClient(max_clients=self._max_clients,\n force_instance=True)", "def _get_http_client(cls):\n if not cls._http_client:\n cls._http_client = resolve_http_client()\n\n return cls._htt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function is called to process the sentiment score of all Tweet's in reader_full_text. The calculated sentiment score for each tweet is the written into the file named tweet_score_output_file_name with new lines one by one as the processing takes place. Note that any outout file with the same named will be replaced...
def process_and_write_to_file(tweet_score_output_file_name, reader_full_text, lexicon_array, hash_table_lex): tweet_word_array = [] # open file to write sentiment score writing_file = open(tweet_score_output_file_name, "w+", encoding='utf-8') previous_line_data = [] # skip the first line because ...
[ "def main():\n\n # command line parsing\n parser = buildParser()\n args = parser.parse_args()\n\n\n # construct the tweet pro-processing object\n tweetTokenizer = TweetTokenizer()\n lPunct = list(string.punctuation)\n lStopwords = stopwords.words('english') + lPunct + ['rt', 'via', '...', '…', ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the tweet id sentiment score of the last tweet in writing_file by deleting the last line in writing_file and then adding a new line containing [previous_line_tweet_id, new_updated_score].
def update_previous_line_sentiment_score(new_updated_score, previous_line_tweet_id, last_line_file_position, writing_file): # move pointer to beginning of previous line to delete the line by truncating at the pointer writing_file.seek(last_line_file_position) writing_file.truncate() # write the new up...
[ "def process_and_write_to_file(tweet_score_output_file_name, reader_full_text, lexicon_array, hash_table_lex):\n tweet_word_array = []\n\n # open file to write sentiment score\n writing_file = open(tweet_score_output_file_name, \"w+\", encoding='utf-8')\n\n previous_line_data = []\n\n # skip the firs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process and prepare time data file for graphing tweet sentiment scores against time. The time data file is set to write into a file named tweet_time_output_data, so any file of the same name will be erased before writing.
def calculate_elapsed_time_and_write_to_file(tweet_time_output_data, csv_reader_time_data, first_tweet_time_data): # open file to write sentiment score writing_file = open(tweet_time_output_data, "w+", encoding='utf-8') # empty file before writing writing_file.truncate() for row in csv_reader_time...
[ "def Create_Analysis_File(tweets, output_filename=None, title = \"Twitter Analysis\"):\n #--- Read input file ---\n if type(tweets) == str:\n if title == \"Twitter Analysis\": #default title given, use filename instead\n title = tweets.replace(\".txt\", \"\")\n if output_filename ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot two graphs, first graph is a graph of total Tweets' sentiment scores against time, the second is a graph of average Tweets' sentiment scores against time
def plot_graph_with_time_and_sentiment_dictionary(time_to_sentiment_dictionary): plot_hours = [] plot_scores = [] list_of_number_of_tweets_for_the_hour = [] # there are 10 type of sentiment sccore for each tweet for k in range(10): plot_scores.append([]) for dkey, dvalue in time_to_sen...
[ "def analyze_and_plot_sentiment_per_week(self, tweets, sentiment_pos_limit, sentiment_neg_limit):\n\t\tanalysation_by_week = self.analyze_sentiment_by_weeks(tweets, sentiment_pos_limit, sentiment_neg_limit)\n\t\tself.plot_sentiment(analysation_by_week)", "def plot_sentiment(sentiment: pd.DataFrame, ticker: str):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Categorize each tweet based on their maximum sentiment score, if the max score is 0, it is categorized as neutral, otherwise if there are one or more sentiment values that matches the maximum score, the tweet is categorized based on the one or more categorized values. This means that a tweet will be categorized as neut...
def categorization_histogram(score_file): tweet_scores = open(score_file, 'r', encoding='utf-8') # sentiment type reference for list_for_categorization: [anger, anticipation, disgust, fear, joy, negative, # positive, sadness, surprise, trust, neutral] list_for_categorization = [0, 0, 0, 0, 0, 0, 0, 0...
[ "def __categorizeSentiment(self):\n score = self.__overallSentiment\n if score < -0.4:\n return \"Worst\"\n elif score < -0.3:\n return \"Very Bad\"\n elif score < -0.1:\n return \"Bad\"\n elif score < 0.1:\n return \"Neutral\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Using subaddresses leads to a few poorly documented exceptions. Normally we set R=rG (tx_pub), however for subaddresses this is equal to R=rD to achieve the nice blockchain scanning property. Remember, that R is pertransaction and not perinput. It's all good if we have a single output or we have a single destination an...
def _check_subaddresses(state: State, outputs: list): from apps.monero.xmr.addresses import classify_subaddresses # let's first figure out what kind of destinations we have num_stdaddresses, num_subaddresses, single_dest_subaddress = classify_subaddresses( outputs, state.change_address() ) ...
[ "def subaddress():\n return SubAddressConfig", "def _precompute_subaddr(state: State, account: int, indices):\n monero.compute_subaddresses(state.creds, account, indices, state.subaddresses)", "def construct_subaddress(self, field):\n return self.subaddresses.construct(self, field)", "def add...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes primary change address for the current account index
def _get_primary_change_address(state: State): from trezor.messages.MoneroAccountPublicAddress import MoneroAccountPublicAddress D, C = monero.generate_sub_address_keys( state.creds.view_key_private, state.creds.spend_key_public, state.account_idx, 0 ) return MoneroAccountPublicAddress( ...
[ "def get_change_address(account=None):\n return wallet['obj'].get_change_address(account)", "def _precompute_subaddr(state: State, account: int, indices):\n monero.compute_subaddresses(state.creds, account, indices, state.subaddresses)", "def current_address():\n return wallet['obj'].current_address", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the change address in state.output_change (from `tsx_data.outputs`) is a) among tx outputs b) is equal to our address The change output is in `tsx_data.change_dts`, but also has to be in `tsx_data.outputs`. This is what Monero does in its cold wallet signing protocol. In other words, these structures are built...
def _check_change(state: State, outputs: list): from apps.monero.xmr.addresses import addr_eq, get_change_addr_idx change_index = get_change_addr_idx(outputs, state.output_change) change_addr = state.change_address() # if there is no change, there is nothing to check if change_addr is None: ...
[ "def _check_subaddresses(state: State, outputs: list):\n from apps.monero.xmr.addresses import classify_subaddresses\n\n # let's first figure out what kind of destinations we have\n num_stdaddresses, num_subaddresses, single_dest_subaddress = classify_subaddresses(\n outputs, state.change_address()\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Precomputes subaddresses for account (major) and list of indices (minors) Subaddresses have to be stored in encoded form unique representation. Single point can have multiple extended coordinates representation would not match during subaddress search.
def _precompute_subaddr(state: State, account: int, indices): monero.compute_subaddresses(state.creds, account, indices, state.subaddresses)
[ "def _check_subaddresses(state: State, outputs: list):\n from apps.monero.xmr.addresses import classify_subaddresses\n\n # let's first figure out what kind of destinations we have\n num_stdaddresses, num_subaddresses, single_dest_subaddress = classify_subaddresses(\n outputs, state.change_address()\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns destination address public view key to be used for payment id encryption. If no encrypted payment ID is chosen, dummy payment ID is set for better transaction uniformity if possible.
def _get_key_for_payment_id_encryption( tsx_data: MoneroTransactionData, change_addr=None, add_dummy_payment_id: bool = False, ): from apps.monero.xmr.addresses import addr_eq from trezor.messages.MoneroAccountPublicAddress import MoneroAccountPublicAddress addr = MoneroAccountPublicAddress( ...
[ "def get_destination_view_key_pub(destinations, change_addr=None):\n from monero_glue.xmr.sub.addr import addr_eq\n\n addr = MoneroAccountPublicAddress(\n spend_public_key=crypto.NULL_KEY_ENC, view_public_key=crypto.NULL_KEY_ENC\n )\n count = 0\n for dest in destinations:\n if dest.amou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encrypts payment_id hex. Used in the transaction extra. Only recipient is able to decrypt.
def _encrypt_payment_id(payment_id, public_key, secret_key): derivation_p = crypto.generate_key_derivation(public_key, secret_key) derivation = bytearray(33) derivation = crypto.encodepoint_into(derivation, derivation_p) derivation[32] = 0x8D # ENCRYPTED_PAYMENT_ID_TAIL hash = crypto.cn_fast_hash(d...
[ "def encrypt_payment_id(payment_id, public_key, secret_key):\n derivation_p = crypto.generate_key_derivation(public_key, secret_key)\n derivation = bytearray(33)\n derivation = crypto.encodepoint_into(derivation, derivation_p)\n derivation[32] = 0x8D # ENCRYPTED_PAYMENT_ID_TAIL\n hash = crypto.cn_fa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs the best customer relocation move, based on routing costs. Of all such moves, the best is performed and the updated solution is returned. O(n^2), where n is the number of customers. Similar to reinsertion in Hornstra et al. (2020). References Savelsbergh, Martin W. P. 1992. "The Vehicle Routing Problem with Ti...
def relocate_customer(solution: Solution) -> Solution: improvements = Heap() costs = routing_costs(solution) for idx_route, curr_route in enumerate(solution.routes): for customer in curr_route: for route in solution.routes[idx_route:]: for idx in range(len(route) + 1): ...
[ "def exchange_customer(solution: Solution) -> Solution:\n improvements = Heap()\n costs = routing_costs(solution)\n\n for idx1, route1 in enumerate(solution.routes):\n for idx2, route2 in enumerate(solution.routes[idx1 + 1:], idx1 + 1):\n iterable = product(range(len(route1)), range(len(r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a footprint object
def create_footprint(band_id, points): return(manifest.Footprint(band_id=band_id, points=points))
[ "def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an affine_transfom object args A dictionary with affine transformation properties; "scale_x", "shear_x", "translate_x", "shear_y", "scale_y", "translate_y" An affine transform object
def create_affine_transform(args): return(manifest.AffineTransform(**args))
[ "def affine(self):\n return Affine(*self.transform)", "def affine_transformation(X_unprj, affine_x, affine_y, args, header):\n\tx_pred = np.dot(X_unprj, affine_x)\n\ty_pred = np.dot(X_unprj, affine_y)\n\treturn x_pred, y_pred", "def get_affine_transform(center,\n scale,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a mask_bands object Can be used for 2 cases; + To use a geotiff as a mask for all bands, band_ids_list = None + To use a geotiff as a mask for specific bands, band_ids_list = None
def create_mask_bands(tileset_id_list, band_ids_list=None): if band_ids_list: out = [{"tileset_id": tmp1, "band_ids": tmp2} for tmp1, tmp2 in zip(tileset_id_list, band_ids_list)] else: out = [{"tileset_id": tmp1} for tmp1 in tileset_id_list] return(manifest.MaskBands(out))
[ "def filter_bands(self, bands: List[Union[int, str]]) -> 'BandDimension':\n return BandDimension(\n name=self.name,\n bands=[self.bands[self.band_index(b)] for b in bands]\n )", "def band(self, name, bands, new_name=None, label=None, text_key=None):\n if self._is_array(n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a bands_list object Can be used for 2 cases; when id_list and the properties lists have equal length band objects are created using the elements from each list in order. When id_list is > the properties lists, the first property is added to each band. id_list List of band id strings md_list List of list with mis...
def create_band_list(id_list, md_list, pp_list, tsbi_list): # Check number of list elements idl = len(id_list) mdl = len(md_list) ppl = len(pp_list) tsbil = len(tsbi_list) # Check case # should be all equal OR exactly 1 of md, pp, and tsbi all_equal = idl == mdl == ppl == tsbil #pr...
[ "def create_mask_bands(tileset_id_list, band_ids_list=None):\n\n if band_ids_list:\n out = [{\"tileset_id\": tmp1, \"band_ids\": tmp2}\n for tmp1, tmp2 in zip(tileset_id_list, band_ids_list)]\n else:\n out = [{\"tileset_id\": tmp1} for tmp1 in tileset_id_list]\n return(manifest....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a source_list object Adds list of uris to soure_list object. uris_list List of list of GCS uris A source_list object
def create_source_list(uris_list): return(manifest.Sources( [{"uris": manifest.Uris([tmp1])} for tmp1 in uris_list] ))
[ "def process_sources(sources_list):\n sources_results = []\n for sources_item in sources_list:\n id = sources_item.get('id')\n name = sources_item.get('name')\n description = sources_item.get('description')\n url = sources_item.get('url')\n category = sources_item.get('categ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a tilesets_list object Adds lists of ids and uris to tilesets_list object.
def create_tilesets_list(uris_list, dt_list, crs_list, id_list=None): if id_list: out = [{"id": manifest.ID(tmp1), "sources": create_source_list(tmp2), "data_type": manifest.DataType(tmp3), "crs": manifest.CRS(tmp4)} for tmp1, tmp2, tmp3, tmp4 ...
[ "def __init__(self, tilesets=None): # noqa: E501 # noqa: E501\n\n self._tilesets = None\n self.discriminator = None\n\n self.tilesets = tilesets", "def tilesets(self, tilesets):\n if tilesets is None:\n raise ValueError(\"Invalid value for `tilesets`, must not be `None`\")...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a properties dictionary object Populates and validates a properties object. properties_dict Flat dictionary of properties, if none given an empty dict with common keys supplied A properties object populated by properties_dict if none given an empty dict with common keys.
def create_properties_dict(properties_dict=None): if properties_dict: out = manifest.Properties(**properties_dict) else: out = manifest.Properties() return out
[ "def make_dictionary(self, properties):\n\t\tdictionary = {}\n\n\t\tfor prop in properties:\n\t\t\tif type(prop) == tuple:\n\t\t\t\tkey, value = prop\n\t\t\t\tdictionary[key] = value\n\t\t\telse:\n\t\t\t\tdictionary[prop] = self.prop_value(prop)\n\n\t\treturn dictionary", "def props_to_dict(properties):\n ret_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a time object Convert a timestamp in iso8601 format to seconds from epoch timestamp Timestamp (UTC) string in iso8601 format, see package iso8601 for details A Timestamp object
def create_timestamp(timestamp): t = int(iso8601.parse_date(timestamp).timestamp()) #datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S') return(manifest.Timestamp(seconds=t))
[ "def timefromisoformat(s):\n hour, minute, second, microsecond = _parsetime(s)\n return time(hour, minute, second, microsecond)", "def iso2ts(iso):\n\n return tt2ts(time.strptime(iso + \" UTC\", \"%Y-%m-%d %H:%M:%S %Z\"))", "def _scalar_local_sidereal_time(t):\n self.observer.date = Timestam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a ImageManifest object name Path string for earthengine Image asset id_list List of tileset id names, these link to band names? e.g., ["band1", "band2"]
def create_image_manifest(name, uris_list, dt_list, crs_list, id_list, md_list, pp_list, tsbi_list, properties_dict, start_time, end_time, footprint=None, pyramiding_policy=None, uri_prefix=...
[ "def create_band_list(id_list, md_list, pp_list, tsbi_list):\n\n # Check number of list elements\n idl = len(id_list)\n mdl = len(md_list)\n ppl = len(pp_list)\n tsbil = len(tsbi_list)\n\n # Check case\n # should be all equal OR exactly 1 of md, pp, and tsbi\n all_equal = idl == mdl == ppl =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connect to Redis instance, if there's no connection already in the flask 'g' object.
def get_redis(): if 'redis' not in g: # connect to redis raddr = app.config['REDIS_HOST'] rhost = raddr.split(':')[0] rport = int(raddr.split(':')[-1]) try: g.redis = Redis(host=rhost, port=rport) except ConnectionError as e: err = f"Could not ...
[ "def _connect_to_redis(self):\n self._redis_client = tornadoredis.Client(host='localhost', port=6379)\n self._redis_client.connect()", "def setup_redis_connection(host=\"localhost\"):\n # log.info(\"Module: {} Function: {}\".format(__name__, sys._getframe().f_code.co_name))\n return redis.Redis(ho...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the rows denoted by ``indices`` form the CSR sparse matrix ``mat``.
def delete_rows_csr(self, mat, indices): if not isinstance(mat, sp.csr_matrix): raise ValueError("works only for CSR format -- use .tocsr() first") indices = list(indices) mask = np.ones(mat.shape[0], dtype=bool) mask[indices] = False return mat[mask]
[ "def delete_from_csr(self,mat, row_indices=[], col_indices=[]):\n #if not isinstance(mat, csr_matrix):\n #raise ValueError(\"works only for CSR format -- use .tocsr() first\")\n\n rows = []\n cols = []\n if len(row_indices)>0:\n rows = list(r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this function evolves the two population model (all model settings are stored in two_pop_velocity). It returns the important parameters of the model.
def two_pop_model_run_ic(x,a_0,time,sig_g,sig_d,v_gas,T,alpha,m_star,V_FRAG,RHO_S,peak_position,E_drift,nogrowth=False): # # constants # plotting = 0 from constants import AU,year # # some setup # n_r = len(x) n_t = len(time) g = ones(n_r) K = zeros(n_r)...
[ "def two_pop_model_run(x_1,a_0,timesteps_1,sigma_g_1,sigma_d_1,v_gas_1,T_1,alpha_1,m_star_1,T_COAG_START,V_FRAG,RHO_S,peak_position_1,E_drift,plotting=False,nogrowth=False):\n #\n # constants\n #\n AU = 1.496e13\t\t\t# astronomical unit in cm\n year = 31558149.54e0\t\t# year in s\n #\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this function evolves the two population model (all model settings are stored in two_pop_velocity). It returns the important parameters of the model.
def two_pop_model_run(x_1,a_0,timesteps_1,sigma_g_1,sigma_d_1,v_gas_1,T_1,alpha_1,m_star_1,T_COAG_START,V_FRAG,RHO_S,peak_position_1,E_drift,plotting=False,nogrowth=False): # # constants # AU = 1.496e13 # astronomical unit in cm year = 31558149.54e0 # year in s # # some setup ...
[ "def two_pop_model_run_ic(x,a_0,time,sig_g,sig_d,v_gas,T,alpha,m_star,V_FRAG,RHO_S,peak_position,E_drift,nogrowth=False):\n #\n # constants\n #\n plotting = 0\n from constants import AU,year\n #\n # some setup\n #\n n_r = len(x)\n n_t = len(time)\n \n g = ones(n_r)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implicit donor cell advectiondiffusion scheme with piecewise constant values
def impl_donorcell_adv_diff_delta(n_x,x,Diff,v,g,h,K,L,flim,u_in,dt,pl,pr,ql,qr,rl,rr,coagulation_method,A,B,C,D): D05=zeros(n_x) h05=zeros(n_x) rhs=zeros(n_x) # # calculate the arrays at the interfaces # for i in arange(1,n_x): D05[i] = flim[i] * 0.5 * (Diff[i-1] + Diff[i]) ...
[ "def test_pure_diffusion(neuron_instance):\n\n h, rxd, data, save_path = neuron_instance\n dend = h.Section()\n dend.diam = 2\n dend.nseg = 101\n dend.L = 100\n\n diff_constant = 1\n\n r = rxd.Region([dend])\n ca = rxd.Species(\n r, d=diff_constant, initial=lambda node: 1 if 0.4 < nod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the current operation is an insertion, False otherwise.
def is_insert(data: dict) -> bool: try: return data["event"]["op"] == "INSERT" except (TypeError, KeyError): raise_critical_error( message="No operation description available, data['op'] key not available.", data=data, exception_type=KeyError )
[ "def is_insertion(ival):\n is_ins = ival.fields[18].endswith('insertion')\n return is_ins", "def is_inserted(self):\n return self.code < -200", "def is_insertion_encodable(basis: Iterable[Perm]) -> bool:\n return InsertionEncodablePerms.is_insertion_encodable_rightmost(\n basis\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns location_id of a location based on directionality and rpt_road_part_id for a mainlane crash, Empty string otherwise. As a result of the empty string behavior, this can be used as a test as well, to see if a SVRD polygon can be used for a mainlane crash.
def is_crash_nonproper_and_directional(crash_id: int) -> str: if not str(crash_id).isdigit(): return False check_nonproper_polygon_query = """ query find_service_road_location($crashId: Int!) { find_service_road_location_for_centerline_crash(args: {input_crash_id: $crashId}) { l...
[ "def find_crash_location(crash_id: int) -> Optional[str]:\n if not str(crash_id).isdigit():\n return None\n\n find_location_query = \"\"\"\n query getLocationAssociation($crash_id: Int!) {\n find_location_for_cr3_collision(args: {id: $crash_id}){\n location_id\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the crash is a mainlane, False otherwise.
def is_crash_mainlane(crash_id: int) -> bool: if not str(crash_id).isdigit(): return False check_mainlane_query = """ query findMainLaneCrashCR3($crash_id: Int!) { find_cr3_mainlane_crash(args: { cr3_crash_id: $crash_id }){ crash_id } ...
[ "def check_crash(self) -> bool:\n # if player crashes into ground\n if self.player_y + PLAYER_HEIGHT >= self.base_y - 1:\n return True\n else:\n player_rect = pygame.Rect(self.player_x, self.player_y,\n PLAYER_WIDTH, PLAYER_HEIGHT)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to retrieve the crash_id from a data dictionary
def get_crash_id(data: dict) -> int: try: return data["event"]["data"]["new"]["crash_id"] except (TypeError, KeyError): raise_critical_error( message="Unable to parse request body to identify a crash_id", data=data )
[ "def find_crash_location(crash_id: int) -> Optional[str]:\n if not str(crash_id).isdigit():\n return None\n\n find_location_query = \"\"\"\n query getLocationAssociation($crash_id: Int!) {\n find_location_for_cr3_collision(args: {id: $crash_id}){\n location_id\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns location_id from a data dictionary, or defaults to None
def get_location_id(data: dict) -> Optional[str]: try: return data["event"]["data"]["new"]["location_id"] except (TypeError, KeyError): return None
[ "def getLocationID():\n import ConfigParser\n config = ConfigParser.SafeConfigParser()\n config.read(CONFIG_FILE)\n try:\n locationID = config.getint('Headlines', 'locationID')\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):\n # Default is Rascal Micro home (Boston, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to find the crash_id of the record to be evaluated.
def find_crash_location(crash_id: int) -> Optional[str]: if not str(crash_id).isdigit(): return None find_location_query = """ query getLocationAssociation($crash_id: Int!) { find_location_for_cr3_collision(args: {id: $crash_id}){ location_id } } ...
[ "def get_cr3_location_id(crash_id: int) -> Optional[str]:\n if not str(crash_id).isdigit():\n return None\n\n query_get_location_id = {\n \"query\": \"\"\"\n query getCrashLocationId($crashId:Int!) {\n atd_txdot_crashes(where: {crash_id: {_eq: $crashId}}){\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs a graphql query and returns the current location_id for a CR3 crash record.
def get_cr3_location_id(crash_id: int) -> Optional[str]: if not str(crash_id).isdigit(): return None query_get_location_id = { "query": """ query getCrashLocationId($crashId:Int!) { atd_txdot_crashes(where: {crash_id: {_eq: $crashId}}){ location_id ...
[ "def find_crash_location(crash_id: int) -> Optional[str]:\n if not str(crash_id).isdigit():\n return None\n\n find_location_query = \"\"\"\n query getLocationAssociation($crash_id: Int!) {\n find_location_for_cr3_collision(args: {id: $crash_id}){\n location_id\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses next section/symbol from map file Scans through the .map file tracked by input MapScannerTracker looking for Symbols defined in .map file. Returns the first symbol encountered.
def get_next_map_token(scanner): for line in scanner.fh: # look for section header m = re.search('^([0-9_A-Z]+)' + \ '(\s+(0x[0-9a-fA-F]+)\s+(0x[0-9a-fA-F]+))?\s*$', line) if m: if m.group(2) != None: section = MapParser...
[ "def find_symbolic_offset(symbol_name, map_file=None):\n if not map_file:\n raise ValueError(\"Missing map file\")\n\n with open(map_file, 'r') as map:\n for line in map:\n parts = line.split()\n if (parts[0] == symbol_name) and \\\n (len(parts) > 1):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses next memory region from xml file Scans through the .xml file tracked by input XMLScannerTracker looking for hardware memory regions. Returns the first region found.
def get_next_xml_token(scanner): for line in scanner.fh: # look for hardware section m = re.search('<Hardware>', line) if m: scanner.in_hw = True continue # look for hardware section end m = re.search('</Hardware>', line) if m: sca...
[ "def parseMEMORY(filename):\n print \"\\nNow Parsing MEMORY file\"\n ###### Load The dataframe\n df,combo_filename = getPairedEventFiles(filename,cols=[\"Start\",\"Stop\",\"Duration\"])\n\n ###### Find Start and Stop Times\n startTime,stopTime = getStartStopTimes(df)\n\n root = generateXMLHeader(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of all fieldnames in the given schema.
def getNames(schema): return [f.getName() for f in schema.fields()]
[ "def field_names(self):\r\n return [field[\"name\"] for field in self.fields]", "def get_field_names(self):\n return list(self._fields.keys())", "def get_schema_entities(schema):\n names = set()\n for table_name, cols in schema.items():\n names.add(table_name.lower().replace('_', ' '))\n f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an ordered dictionary, which maps all Schemata names to fields that belong to the Schemata.
def getSchemata(obj): schema = obj.Schema() schemata = OrderedDict() for f in schema.fields(): sub = schemata.get(f.schemata, WrappedSchemata(name=f.schemata)) sub.addField(f) schemata[f.schemata] = sub.__of__(obj) return schemata
[ "def schemata(schema):\r\n return dict((n, Schema.from_attribute(s)) for n, s in schema.items())", "def getSchemataNames(self):\n lst = []\n for f in self.fields():\n if not f.schemata in lst:\n lst.append(f.schemata)\n return lst", "def getSchemataFields(self, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new Schemata object that contains all fields and layers from ``self`` and ``other``.
def __add__(self, other): c = Schemata() for field in self.fields(): c.addField(field) for field in other.fields(): c.addField(field) return c
[ "def copy(self):\n c = Schemata()\n for field in self.fields():\n c.addField(field.copy())\n return c", "def copy(self):\n c = BasicSchema()\n for field in self.fields():\n c.addField(field.copy())\n # Need to be smarter when joining layers\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a deep copy of this Schemata.
def copy(self): c = Schemata() for field in self.fields(): c.addField(field.copy()) return c
[ "def copy(self) -> \"DataArray\":\n return deepcopy(self)", "def copy(self):\n c = BasicSchema()\n for field in self.fields():\n c.addField(field.copy())\n # Need to be smarter when joining layers\n # and internal props\n c._props.update(self._props)\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of editable fields for the given instance
def editableFields(self, instance, visible_only=False): ret = [] for field in self.fields(): if field.writeable(instance, debug=False) and \ (not visible_only or field.widget.isVisible(instance, 'edit') != 'invisible'): ret.append(fie...
[ "def editable_fields(self):\n self.wait_for_ajax()\n self.wait_for_element_visibility('.u-field-username', 'username is not visible')\n\n fields = ['country', 'language_proficiencies', 'bio']\n return [field for field in fields if self.field_is_editable(field)]", "def viewableFields(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of viewable fields for the given instance
def viewableFields(self, instance): return [field for field in self.fields() if field.checkPermission('view', instance)]
[ "def editableFields(self, instance, visible_only=False):\n ret = []\n for field in self.fields():\n if field.writeable(instance, debug=False) and \\\n (not visible_only or\n field.widget.isVisible(instance, 'edit') != 'invisible'):\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dictionary that contains a widget for each field, using the field name as key.
def widgets(self): widgets = {} for f in self.fields(): widgets[f.getName()] = f.widget return widgets
[ "def widget(self):\n dictionary = {'label': QtWidgets.QLabel(self.label)}\n widget = dictionary['widget'] = QtWidgets.QComboBox()\n for label, choice in zip(self.labels, self.choices):\n widget.addItem(label, choice)\n widget.valueChanged = widget.currentIndexChanged\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a subset of self.fields(), containing only fields that satisfy the given conditions. You can either specify predicates or values or both. If you provide both, all conditions must be satisfied. For each ``predicate`` (positional argument), ``predicate(field)`` must return 1 for a Field ``field`` to be returned a...
def filterFields(self, *predicates, **values): results = [] for field in self.fields(): # step through each of my fields # predicate failed: failed = [pred for pred in predicates if not pred(field)] if failed: continue # attribute missing: ...
[ "def filter(self, *preds):\n if len(preds) == 0:\n return self\n\n # XXX Could optimize if there's just one pred and it's a\n # function\n parts = []\n flocals = {}\n for i, pred in enumerate(preds):\n if isinstance(pred, collections.Callable):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a given field to my dictionary of fields.
def addField(self, field): field = aq_base(field) self._validateOnAdd(field) name = field.getName() if name not in self._names: self._names.append(name) self._fields[name] = field
[ "def add_field(self, field):\n # lots of stuff left, needs to be done here\n if not field.get('name'):\n field['name'] = reduce_to_alphanumeric(unicode(field.get('label')).lower())\n \n if self.validate_field(field):\n self.fields.append(field)\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list containing names of all searchable fields.
def searchable(self): return [f.getName() for f in self.fields() if f.searchable]
[ "def list_search_fields(self):", "def field_names(self):\r\n return [field[\"name\"] for field in self.fields]", "def get_field_names(self):\n return list(self._fields.keys())", "def getfieldList(self):\n res = []\n ignored_fields = [\"_root_\", \"_version_\", ]\n fields = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }