query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Try to add a video to the list. If size available, adds it, update available size and return True. If not, return False.
def add_video(self, video_id, video_size): if self.available_size >= video_size: if video_id not in self.videos: self.videos.append(video_id) self.available_size -= video_size return True return False
[ "def _insert_new_video(self, video):\n self.__cachedb[video['idVideo']] = video\n self.__cache_size += video['size']\n self.__cache_fifo.append(video['idVideo'])", "def _make_space_for_new_video(self, video=None, size=None):\n vsize = size\n if video != None:\n vsize ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encodes a binary mask using the RLE format.
def encode_mask(mask, dtype=np.uint8): if mask.dtype != dtype: mask = mask.astype(dtype) rle = mask_utils.encode(np.asfortranarray(mask, dtype=dtype)) rle["counts"] = six.ensure_str(rle["counts"]) return rle
[ "def encode_binary_mask(mask):\n\n # convert input mask to expected COCO API input --\n mask_to_encode = np.expand_dims(mask, axis=2)\n mask_to_encode = np.asfortranarray(mask_to_encode)\n\n # RLE encode mask --\n encoded_mask = coco_mask.encode(mask_to_encode)[0][\"counts\"]\n\n # compress and ba...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes area of an encoded mask.
def area_from_encoded_mask(encoded_mask): return mask_utils.area(encoded_mask)
[ "def calc_area_using_mask(mask):\n return int(np.sum(mask)/255) # all black pixels are 255 instead of 1", "def calc_area_element(img):#not precise unless a txt file with actual counts is loaded\n return int(np.sum(img))", "def compute_area(self) -> None:\n for ann in tqdm(\n self.anns.va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes bounding box from an encoded mask.
def bbox_from_encoded_mask(encoded_mask): return mask_utils.toBbox(encoded_mask)
[ "def get_mask_bounding_box(self):\r\n try:\r\n self.mask\r\n if self.mask_xy_dim == self.image_xy_dim:\r\n x, y, w, h = self.calculate_bounding_box()\r\n self.mask_bounding_box = {}\r\n self.mask_bounding_box.update({\r\n '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003.
def _elastic(image, elastic_blurshape, alpha=None, sigma=None): if alpha == None: alpha = image.shape[0] * random.uniform(0.5,2) if sigma == None: sigma = int(image.shape[0] * random.uniform(0.5,1)) shape = image.shape[:2] dx, dy = [cv2.GaussianBlur((elastic_blurshape * 2 - 1) * al...
[ "def runElasticNet():\n X,y=preprocess()\n ElasticNet(X,y)", "def elastic_transform(image, alpha, sigma):\n\n alpha = 300\n sigma = 8\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A helper shorthand to set the symmetric distance matrix weights. This makes the D manipulation code esier to read and less error prone to write.
def set_weight(D,n1,n2,wt): D[n1,n2]=wt D[n2,n1]=wt
[ "def setConnectionsWeights(self, newWeights):\n matrix = self.connectionsMatrix\n assert len(matrix) == len(newWeights), 'Dimension missmatch in setConnectionsWeights! %s | %s' % (len(matrix), len(newWeights))\n\n for line, lineWeights in zip(matrix, newWeights):\n assert len(line) =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
initialize the parcel manager
def __init__(self, agent = None, region = None, settings = None, message_handler = None, events_handler = None): super(ParcelManager, self).__init__(agent, settings) self.region = region self.message_handler = message_handler # otherwise, let's just use our own # unused atm ...
[ "def _setup_cell_manager(self):\n self.cm = CellManager(arrays_to_bin=self.arrays,\n min_cell_size=self.block_size,\n max_cell_size=self.block_size,\n initialize=True)", "def initialize(self, particles):\n self.pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
enable the callback handlers for this ParcelManager
def enable_callbacks(self): if self.message_handler == None: self.message_handler = MessageHandler() self.onParcelOverlay_received = self.message_handler.register('ParcelOverlay') self.onParcelOverlay_received.subscribe(self.onParcelOverlay) self.onParcelProperties_received...
[ "def register_handler(self, protocol, handler, send_enable=True):\n self.handlers[protocol] = handler\n if send_enable:\n fields = get_csv_args(handler)\n if not fields:\n fields = '*'\n self.cmd('ENABLE', protocol, fields)", "def setEnableCallback(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse and handle an incoming ParcelOverlay packet Currently, we store this data in the ParcelManager.packet_overlay dictionary as parcel_overlay[sequence_id] = data (unparsed binary)
def onParcelOverlay(self, packet): # unpack the data sequence_id = packet['ParcelData'][0]['SequenceID'] data = packet['ParcelData'][0]['Data'] # store the data # ToDo: make sense of the binary blob in data self.parcel_overlay[sequence_id] = data
[ "def _read_para_overlay_id(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument\n _olid = self._read_unpack(clen)\n\n overlay_id = dict(\n type=desc,\n critical=cbit,\n length=clen,\n id=_olid,\n )\n\n _plen = l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse and handle an incoming ParcelProperties packet. Parse and serialize the info into a Parcel() representation, then store it (or replace the stored version)
def onParcelProperties(self, packet): parcel_info = {} parcel_info['RequestResult'] = packet['ParcelData'][0]['RequestResult'] parcel_info['SequenceID'] = packet['ParcelData'][0]['SequenceID'] parcel_info['SnapSelection'] = packet['ParcelData'][0]['SnapSelection'] parcel_inf...
[ "def onParcelPropertiesUpdate(self, packet):\n\n parcel_update = {}\n\n parcel_update['LocalID'] = packet['ParcelData'][0]['LocalID']\n parcel_update['Flags'] = packet['ParcelData'][0]['Flags']\n parcel_update['ParcelFlags'] = packet['ParcelData'][0]['ParcelFlags']\n parcel_update...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse and handle an incoming ParcelPropertiesUpdate packet. parse the data into a dictionary and pass the blob to the Parcel() instance for self handling
def onParcelPropertiesUpdate(self, packet): parcel_update = {} parcel_update['LocalID'] = packet['ParcelData'][0]['LocalID'] parcel_update['Flags'] = packet['ParcelData'][0]['Flags'] parcel_update['ParcelFlags'] = packet['ParcelData'][0]['ParcelFlags'] parcel_update['SalePrice'...
[ "def onParcelProperties(self, packet):\n\n parcel_info = {}\n\n parcel_info['RequestResult'] = packet['ParcelData'][0]['RequestResult']\n parcel_info['SequenceID'] = packet['ParcelData'][0]['SequenceID']\n parcel_info['SnapSelection'] = packet['ParcelData'][0]['SnapSelection']\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
store a representation of a parcel
def _store_parcel_properties(self, parcel_info): # update the attributes of an existing parcel list member, else, append index = [self.parcels.index(parcel) for parcel in self.parcels if parcel.LocalID == parcel_info['LocalID']] if index != []: self._update_parcel_properties(parc...
[ "def _store(self):\n self.logger.debug('Starting to store the data...')\n mapping = {}\n prefix = self.config['spotify']['output']['prefix']\n for label in self.df['label'].unique():\n if label == label: #check for nan, nan's are not equal to itself\n mapping[in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update a stored parcel's properties. finds the stored parcel and passes it a dictionary to process
def _update_parcel_properties(self, parcel_properties): parcels_found = [] if parcel_properties.has_key('LocalID'): LocalID = parcel_properties['LocalID'] parcels_found = [parcel for parcel in self.parcels if str(parcel.LocalID) == str(LocalID)] if len(parcels_fo...
[ "def _update_properties(self, parcel_properties):\n\n for attribute in parcel_properties:\n\n # if self.settings.LOG_VERBOSE: logger.debug(\"Updating parcel data for %s. %s = %s\" % (self, attribute, parcel_properties[attribute]))\n\n setattr(self, attribute, parcel_properties[attribute...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use the parcel's bitmap to update the manager's (x,y) to LocalID mapping
def _update_parcel_map(self, parcel): full = True for x in range(64): for y in range(64): index = x + (64 * y) byte = index >> 3 mask = 1 << (index % 8) # *TODO: Bitmap should be stored as a byte array, not a string ...
[ "def get_parcel_id_by_location(self, local_x, local_y):\n return self.parcel_map[ int(local_x)/4 ][ int(local_y)/4 ]", "def _get_new_id(self, *position) -> int:\n return self.canvas.create_bitmap(*position)", "def _get_new_id(self, *position) -> int:\n return self.canvas.create_image(*posit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a parcel's local id if info has been received, 0 otherwise.
def get_parcel_id_by_location(self, local_x, local_y): return self.parcel_map[ int(local_x)/4 ][ int(local_y)/4 ]
[ "def _get_local_machine_id(self):\n res = 0\n for ID, data in self._server_namebook.items():\n machine_id = data[0]\n ip = data[1]\n if ip in self._local_ip4_addr_list():\n res = machine_id\n break\n\n return res", "def lpar_id(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the agent's current parcel if info has been received, None otherwise.
def get_current_parcel(self): return self.get_parcel_by_location( self.agent.Position.X, self.agent.Position.Y )
[ "def getCurrentMission(self) -> Optional[Mission]:\n argosMission = self.argosController.getCurrentMission()\n if argosMission is not None:\n return argosMission\n return self.crazyradioController.getCurrentMission()", "def current(self) -> SupervisorJob:\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
request the estate covenant (for the current estate)
def request_estate_covenant(self, ): self.onEstateCovenantReply_received = self.message_handler.register('EstateCovenantReply') self.onEstateCovenantReply_received.subscribe(self.onEstateCovenantReply) self.sendEstateCovenantRequest(self.agent.agent_id, self.agent.session_id)
[ "def GetEn1Covenant(self):\r\n print(\"/\")", "def GetSelfCovenant(self):\r\n print(\"/\")", "def sendEstateCovenantRequest(self, agent_id, session_id):\n\n packet = Message('EstateCovenantRequest',\n Block('AgentData',\n AgentID = agent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send an EstateCovenantRequest message to the host simulator
def sendEstateCovenantRequest(self, agent_id, session_id): packet = Message('EstateCovenantRequest', Block('AgentData', AgentID = agent_id, SessionID = session_id)) self.region.enqueue_message(packet)
[ "def request_estate_covenant(self, ):\n\n self.onEstateCovenantReply_received = self.message_handler.register('EstateCovenantReply')\n self.onEstateCovenantReply_received.subscribe(self.onEstateCovenantReply)\n\n self.sendEstateCovenantRequest(self.agent.agent_id, self.agent.session_id)", "as...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse and handle an EstateCovenantReply packet
def onEstateCovenantReply(self, packet): try: self.onEstateCovenantReply_received.unsubscribe(self.onEstateCovenantReply) except AttributeError: pass CovenantID = packet['Data'][0]['CovenantID'] CovenantTimestamp = packet['Data'][0]['CovenantTimestamp'] ...
[ "def parseRtspReply(self, data):\r\n\t\t#TODO\r\n\t\trequest = data.split('\\n')\r\n\t\tline1 = request[1].split(' ')\r\n\t\tseqNum = int(line1[1])\r\n \r\n\t\tif seqNum == self.rtspSeq:\r\n\t\t\tline2 = request[2].split(' ')\r\n\t\t\tself.sessionId = int(line2[1])\t\r\n\r\n\t\tline0 = request[0].split(' ')\r\n\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sends a ParcelPropertiesRequest message to the host simulator
def sendParcelPropertiesRequest(self, agent_id, session_id, SequenceID, West, South, East, North, SnapSelection): packet = Message('ParcelPropertiesRequest', Block('AgentData', AgentID = agent_id, SessionID = session_id), ...
[ "def onParcelProperties(self, packet):\n\n parcel_info = {}\n\n parcel_info['RequestResult'] = packet['ParcelData'][0]['RequestResult']\n parcel_info['SequenceID'] = packet['ParcelData'][0]['SequenceID']\n parcel_info['SnapSelection'] = packet['ParcelData'][0]['SnapSelection']\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sends a ParcelPropertiesRequestByID packet
def sendParcelPropertiesRequestByID(self, agent_id, session_id, SequenceID, LocalID): packet = Message('ParcelPropertiesRequestByID', Block('AgentData', AgentID = agent_id, SessionID = session_id), ...
[ "def onParcelProperties(self, packet):\n\n parcel_info = {}\n\n parcel_info['RequestResult'] = packet['ParcelData'][0]['RequestResult']\n parcel_info['SequenceID'] = packet['ParcelData'][0]['SequenceID']\n parcel_info['SnapSelection'] = packet['ParcelData'][0]['SnapSelection']\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send a ParcelInfoRequest packet for the specified parcel_id
def sendParcelInfoRequest(self, agent_id, session_id, parcel_id): packet = Message('ParcelInfoRequest', Block('AgentData', AgentID = agent_id, SessionID = session_id), Block('Data', ...
[ "def onParcelInfoReply(self, packet):\n\n parcel_info = {}\n\n parcel_info['ParcelID'] = packet['Data'][0]['ParcelID']\n parcel_info['OwnerID'] = packet['Data'][0]['OwnerID']\n parcel_info['Name'] = packet['Data'][0]['Name']\n parcel_info['Desc'] = packet['Data'][0]['Desc']\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse and handle a ParcelInfoReply packet
def onParcelInfoReply(self, packet): parcel_info = {} parcel_info['ParcelID'] = packet['Data'][0]['ParcelID'] parcel_info['OwnerID'] = packet['Data'][0]['OwnerID'] parcel_info['Name'] = packet['Data'][0]['Name'] parcel_info['Desc'] = packet['Data'][0]['Desc'] parcel_inf...
[ "def parseRtspReply(self, data):\r\n\t\t#TODO\r\n\t\trequest = data.split('\\n')\r\n\t\tline1 = request[1].split(' ')\r\n\t\tseqNum = int(line1[1])\r\n \r\n\t\tif seqNum == self.rtspSeq:\r\n\t\t\tline2 = request[2].split(' ')\r\n\t\t\tself.sessionId = int(line2[1])\t\r\n\r\n\t\tline0 = request[0].split(' ')\r\n\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
request the properties of all of the parcels on the current region. The delay parameter is a sleep between the send of each packet request; if refresh, current data will be discarded before requesting. If refresh is not True, data will not be rerequested for region locations already queried.
def request_all_parcel_properties(self, delay = 0.5, refresh = False): # spawn a coroutine so this is non blocking eventlet.spawn(self.__request_all_parcel_properties, delay, refresh)
[ "def __request_all_parcel_properties(self, delay = 1, refresh = False):\n\n if refresh:\n self.parcel_map = [[0 for _ in range(64)] for _ in range(64)]\n self.parcel_map_full = False\n\n # minimum parcel size is 4x4m (16sq)\n # ugh this is a wretched way to request parcel ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
request the properties of all of the parcels on the current region
def __request_all_parcel_properties(self, delay = 1, refresh = False): if refresh: self.parcel_map = [[0 for _ in range(64)] for _ in range(64)] self.parcel_map_full = False # minimum parcel size is 4x4m (16sq) # ugh this is a wretched way to request parcel info, but it...
[ "def request_all_parcel_properties(self, delay = 0.5, refresh = False):\n\n # spawn a coroutine so this is non blocking\n eventlet.spawn(self.__request_all_parcel_properties, delay, refresh)", "def proteins(self):\n return self._regions.keys()", "def extract_properties(labels, image):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the specified objects for the specified parcel
def return_parcel_objects(self, ): pass ''' // ParcelReturnObjects // viewer -> sim // reliable { ParcelReturnObjects Low 199 NotTrusted Zerocoded { AgentData Single { AgentID LLUUID } { SessionID LLUUID } } ...
[ "def range_reduction_get_objects(self):\n raise NotImplementedError() # pragma:nocover", "def select_objects(self, ):\n\n pass\n\n '''\n // ParcelSelectObjects\n // viewer -> sim\n // reliable\n {\n \tParcelSelectObjects Low 202 NotTrusted Zerocoded\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set objects nonphysical and disable scripts for the specified parcel
def disable_objects(self, ): pass ''' // Disable makes objects nonphysical and turns off their scripts. // ParcelDisableObjects // viewer -> sim // reliable { ParcelDisableObjects Low 201 NotTrusted Zerocoded { AgentData Single ...
[ "def disable_objects(self, ):\n\n pass\n\n '''\n // Disable makes objects nonphysical and turns off their scripts.\n // ParcelDisableObjects\n // viewer -> sim\n // reliable\n {\n \tParcelDisableObjects Low 201 NotTrusted Zerocoded\n \t{\n \t\tAg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send a ParcelDisableObjects packet
def sendParcelDisableObjects(self, ): pass ''' // Disable makes objects nonphysical and turns off their scripts. // ParcelDisableObjects // viewer -> sim // reliable { ParcelDisableObjects Low 201 NotTrusted Zerocoded { AgentData Si...
[ "def disable_objects(self, ):\n\n pass\n\n '''\n // Disable makes objects nonphysical and turns off their scripts.\n // ParcelDisableObjects\n // viewer -> sim\n // reliable\n {\n \tParcelDisableObjects Low 201 NotTrusted Zerocoded\n \t{\n \t\tAg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
joins the specified parcels
def join_parcels(self, ): pass ''' // ParcelJoin - Take all parcels which are owned by agent and inside // rectangle, and make them 1 parcel if they all are leased. // viewer -> sim // reliable { ParcelJoin Low 210 NotTrusted Unencoded { ...
[ "def sendParcelJoin(self, ):\n\n pass\n\n '''\n // ParcelJoin - Take all parcels which are owned by agent and inside\n // rectangle, and make them 1 parcel if they all are leased.\n // viewer -> sim\n // reliable\n {\n \tParcelJoin Low 210 NotTrusted Unencoded...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send a ParcelJoin packet
def sendParcelJoin(self, ): pass ''' // ParcelJoin - Take all parcels which are owned by agent and inside // rectangle, and make them 1 parcel if they all are leased. // viewer -> sim // reliable { ParcelJoin Low 210 NotTrusted Unencoded { ...
[ "def join_parcels(self, ):\n\n pass\n\n '''\n // ParcelJoin - Take all parcels which are owned by agent and inside\n // rectangle, and make them 1 parcel if they all are leased.\n // viewer -> sim\n // reliable\n {\n \tParcelJoin Low 210 NotTrusted Unencoded\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
divide the selection into a new parcel
def divide_parcel(self, ): pass ''' // ParcelDivide // If the selection is a subsection of exactly one parcel, // chop out that section and make a new parcel of it. // viewer -> sim // reliable { ParcelDivide Low 211 NotTrusted Unencoded ...
[ "def coarsenSubdivSelectionList():\n pass", "def sendParcelDivide(self, ):\n\n pass\n\n '''\n // ParcelDivide\n // If the selection is a subsection of exactly one parcel,\n // chop out that section and make a new parcel of it.\n // viewer -> sim\n // reliable\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send a ParcelDivide packet
def sendParcelDivide(self, ): pass ''' // ParcelDivide // If the selection is a subsection of exactly one parcel, // chop out that section and make a new parcel of it. // viewer -> sim // reliable { ParcelDivide Low 211 NotTrusted Unencoded ...
[ "def divide_parcel(self, ):\n\n pass\n\n '''\n // ParcelDivide\n // If the selection is a subsection of exactly one parcel,\n // chop out that section and make a new parcel of it.\n // viewer -> sim\n // reliable\n {\n \tParcelDivide Low 211 NotTrusted ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
request an access list for the specified parcel, while enabling a callback handler for the response
def request_parcel_access_list(self, LocalID, Flags): self.onParcelAccessListReply_received = self.message_handler.register('ParcelAccessListReply') self.onParcelAccessListReply_received.subscribe(self.onParcelAccessListReply, LocalID = LocalID) self.sendParcelAccessListRequest(self.agent.agen...
[ "async def has_access_handler(request: aiohttp.web.Request) -> aiohttp.web.Response:\n access_list = []\n access_list = await request.app[\"db_conn\"].get_access_list(request.match_info[\"user\"])\n\n MODULE_LOGGER.log(\n logging.DEBUG, \"Returning following access list: %s\", str(access_list)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send a ParcelAccessListRequest packet to the host simulator
def sendParcelAccessListRequest(self, agent_id, session_id, LocalID, Flags, SequenceID = -5150): packet = Message('ParcelAccessListRequest', Block('AgentData', AgentID = agent_id, SessionID = session_id), ...
[ "def request_parcel_access_list(self, LocalID, Flags):\n\n self.onParcelAccessListReply_received = self.message_handler.register('ParcelAccessListReply')\n self.onParcelAccessListReply_received.subscribe(self.onParcelAccessListReply, LocalID = LocalID)\n\n self.sendParcelAccessListRequest(self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse and handle a ParcelAccessListReply packet
def onParcelAccessListReply(self, packet): #self.onParcelAccessListReply_received.unsubscribe(self.onParcelAccessListReply, LocalID = LocalID) raise NotImplemented("sendFetchInventoryDescendentsRequest") ''' // sim -> viewer // ParcelAccessListReply { ParcelAc...
[ "def parseRtspReply(self, data):\r\n\t\t#TODO\r\n\t\trequest = data.split('\\n')\r\n\t\tline1 = request[1].split(' ')\r\n\t\tseqNum = int(line1[1])\r\n \r\n\t\tif seqNum == self.rtspSeq:\r\n\t\t\tline2 = request[2].split(' ')\r\n\t\t\tself.sessionId = int(line2[1])\t\r\n\r\n\t\tline0 = request[0].split(' ')\r\n\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
request dwell for the specified parcel, while enabling a callback handler for the response
def request_parcel_dwell(self, LocalID): self.onParcelDwellReply_received = self.message_handler.register('ParcelDwellReply') self.onParcelDwellReply_received.subscribe(self.onParcelDwellReply, LocalID = LocalID) self.sendParcelDwellRequest(self.agent.agent_id, self.agent.session_id, LocalID)
[ "def StreamExecuteKeyRanges(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def emit_until_response(self, event_name, **kwargs):\n ...", "def combo_callback(self)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send a ParcelDwellRequest packet
def sendParcelDwellRequest(self, agent_id, session_id, LocalID): packet = Message('ParcelDwellRequest', Block('AgentData', AgentID = agent_id, SessionID = session_id), Block('Data', ...
[ "def request_parcel_dwell(self, LocalID):\n\n self.onParcelDwellReply_received = self.message_handler.register('ParcelDwellReply')\n self.onParcelDwellReply_received.subscribe(self.onParcelDwellReply, LocalID = LocalID)\n\n self.sendParcelDwellRequest(self.agent.agent_id, self.agent.session_id,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update a parcel's properties via a dictionary
def _update_properties(self, parcel_properties): for attribute in parcel_properties: # if self.settings.LOG_VERBOSE: logger.debug("Updating parcel data for %s. %s = %s" % (self, attribute, parcel_properties[attribute])) setattr(self, attribute, parcel_properties[attribute])
[ "def _update_parcel_properties(self, parcel_properties):\n\n parcels_found = []\n\n if parcel_properties.has_key('LocalID'):\n\n LocalID = parcel_properties['LocalID']\n\n parcels_found = [parcel for parcel in self.parcels if str(parcel.LocalID) == str(LocalID)]\n\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sends a SetOtherCleanTime packet for this parcel
def set_other_clean_time(self, ): pass ''' // ParcelSetOtherCleanTime // viewer -> sim // reliable { ParcelSetOtherCleanTime Low 200 NotTrusted Zerocoded { AgentData Single { AgentID LLUUID } { SessionID LLUUID } ...
[ "def contracted_time(self, contracted_time):\n if contracted_time is None:\n raise ValueError(\"Invalid value for `contracted_time`, must not be `None`\")\n\n self._contracted_time = contracted_time", "def coalescing_time(self, coalescing_time):\n\n self._coalescing_time = coalesci...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set objects nonphysical and disable scripts for this parcel
def disable_objects(self, ): pass ''' // Disable makes objects nonphysical and turns off their scripts. // ParcelDisableObjects // viewer -> sim // reliable { ParcelDisableObjects Low 201 NotTrusted Zerocoded { AgentData Single ...
[ "def disable_objects(self, ):\n\n pass\n\n '''\n // Disable makes objects nonphysical and turns off their scripts.\n // ParcelDisableObjects\n // viewer -> sim\n // reliable\n {\n \tParcelDisableObjects Low 201 NotTrusted Zerocoded\n \t{\n \t\tAg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
selects the specified objects for this parcel
def select_objects(self, ): pass ''' // ParcelSelectObjects // viewer -> sim // reliable { ParcelSelectObjects Low 202 NotTrusted Zerocoded { AgentData Single { AgentID LLUUID } { SessionID LLUUID } } ...
[ "def select(self, obj_list):\n pos = obj_list[0].matrix[3]\n screen_coordinates = self.active_camera.world_to_screen(pos)\n self.add_object(\"ObjectInfo\", object_box(screen_coordinates[0], screen_coordinates[1], obj_list[0]))", "def select_object(obj):\n\n unselect_all()\n \n bpy.co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
deed this parcel to a group
def deed_to_group(self, ): pass ''' // ParcelDeedToGroup - deed a patch of land to a group // viewer -> sim // reliable { ParcelDeedToGroup Low 207 NotTrusted Unencoded { AgentData Single { AgentID LLUUID } { SessionID...
[ "def delete_group(self,iSurveyID,iGroupID):", "def deleteGroup(id):", "def release_object_group(self):\n raise NotImplementedError()", "def deleted(self, group, **payload):\n pass", "def move_group_backward(self, group):\n self._move_group_backward(group.encode())", "def move_group_to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
release this parcel to the public
def release(self, ): pass ''' // ParcelRelease // Release a parcel to public // viewer -> sim // reliable { ParcelRelease Low 212 NotTrusted Unencoded { AgentData Single { AgentID LLUUID } { SessionID LLUUID } ...
[ "def release(self):\n self.data_lock.r_release()", "def release(self):\n if self._lock.locked():\n self._lock.release()", "def release(self):\n self.free = True\n self.guest = None\n self.occupy_time = None", "def release(self):\n # type: () -> None\n if self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the hunt, in the paused state.
def Start(self): # Anyone can create the hunt but it will be created in the paused # state. Permissions are required to actually start it. with implementation.StartHunt( runner_args=self.args.hunt_runner_args, args=self.args.hunt_args, token=self.token) as hunt_obj: # Nothing ...
[ "def Start(self):\n hunt = implementation.GRRHunt.StartHunt(\n \"GenericHunt\",\n flow_name=self.state.hunt_flow_name,\n args=self.state.hunt_flow_args,\n expiry_time=self.state.expiry_time,\n client_limit=self.state.client_limit,\n output_plugins=self.state.output_plugi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the hunt and run it.
def Start(self): with implementation.StartHunt( runner_args=self.args.hunt_runner_args, args=self.args.hunt_args, token=self.token) as hunt_obj: hunt_obj.Run() self.Log("User %s created a new %s hunt (%s)", self.token.username, hunt_obj.args.flow_runner_args.flow...
[ "def Start(self):\n hunt = implementation.GRRHunt.StartHunt(\n \"GenericHunt\",\n flow_name=self.state.hunt_flow_name,\n args=self.state.hunt_flow_args,\n expiry_time=self.state.expiry_time,\n client_limit=self.state.client_limit,\n output_plugins=self.state.output_plugi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates nominal edges and centers for logarithmic radial bins(base10 logarithm) Edges and areas are exact, "center" values are estimated as CIRCUMFERENCE weighted mean radius
def radial_bins(rmin, rmax, nbin): edges = np.logspace(math.log10(rmin), math.log10(rmax), nbin + 1, endpoint=True) cens = np.array([(edges[i + 1] ** 3. - edges[i] ** 3.) * 2. / 3. / (edges[i + 1] ** 2. - edges[i] ** 2.) for i, edge in enumerate(...
[ "def radial_data(data,annulus_width=1,working_mask=None, weight = None, x=None,y=None,rmax=None):\n \n# 2012-02-25 20:40 IJMC: Empty bins now have numel=0, not nan.\n# 2012-02-04 17:41 IJMC: Added \"SUM\" flag\n# 2010-11-19 16:36 IJC: Updated documentation for Sphinx\n# 2010-03-10 19:22 IJC: Ported to python fro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assigns the dtypes to the flattened array
def flat_type(recarr): newtype = [] for dt in recarr.dtype.descr: if len(dt) == 3: for i in np.arange(dt[2][0]): newtype.append((dt[0] + '_' + str(i), dt[1])) else: newtype.append(dt) return newtype
[ "def astype(array, dtype):\n\tpass", "def infer_array_dtype(array: SymbolicArray) -> DTypeLike:\n if all(el.is_integer for el in array.flat):\n return np.int64\n elif all(el.is_real for el in array.flat):\n return np.float64\n elif all(el.is_complex for el in array.flat):\n return np...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copies the record array into a new recarray which has only 1D columns
def flat_copy(recarr): newtype = flat_type(recarr) newarr = np.zeros(len(recarr), dtype=newtype) oldnames = recarr.dtype.names j = 0 for i, dt in enumerate(recarr.dtype.descr): if len(dt) == 3: for c in np.arange(dt[2][0]): # print newtype[j] ...
[ "def stretch(arr, col_names, asrecarray=True):\n dt = []\n has_array_field = False\n has_scalar_filed = False\n first_array = None\n\n # Construct dtype\n for c in col_names:\n if _is_array_field(arr, c):\n dt.append((c, arr[c][0].dtype))\n has_array_field = True\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get entries by start index and batch size.
def get_entries(start_index, batch_size=10, **kwargs): extra = None if kwargs and kwargs.has_key('tag'): tag = kwargs.pop('tag') else: tag = None if start_index is None: if tag is None: entries = Entry.gql( 'ORDER BY index DESC').fetch( ...
[ "def batch( qs, batch_size=1000 ):\n total = qs.count()\n for start in range( 0, total, batch_size ):\n end = min( start + batch_size, total )\n yield ( start, end, total, qs[ start:end ] )", "def get_batch(self, batch_size):\n\n if batch_size < len(self.train_set) :\n img_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the highscore text file with the new highscore.
def update_score(highscore, highscore_filename): with open(highscore_filename, "w") as highscore_file: highscore_file.write("human {}\nai {}".format(highscore[0], highscore[1]))
[ "def update_score(self):\n try:\n with open(self.filename, \"r+\") as f:\n contents = f.readlines()\n except:\n with open(self.filename, \"w\") as fn:\n fn.write(\"\" + self.name + \" \" + str(self.score) + \"\\n\")\n return\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save the agent parameters to a JSON file.
def save_agent(agent, out_filename): with open(out_filename, "w") as out_file: json.dump(agent.mdp_data, out_file) print("The AI agent has been saved to: {}".format(out_filename))
[ "def save(self):\r\n try:\r\n with open(self.json_name(), \"w\") as json_file:\r\n json_str = dumps(self.values)\r\n json_file.write(json_str)\r\n except:\r\n print(\"Error: Writing data to file failed\")", "def save_calibration_parameters(self):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
try to retrieve the user from the sessions table return usernick or None if no valid session is present
def session_user(db): if bottle.request.get_cookie(COOKIE_NAME) != '' or bottle.request.get_cookie(COOKIE_NAME) != None: cur = db.cursor() # retrieve user sessionid and usernick (username) from the sessions table rows = [row[0]for row in cur.execute("SELECT sessionid, usernick FROM sessions...
[ "def session_user(db):\n cursor = db.cursor()\n sql = \"SELECT user FROM sessions WHERE sessionid=?\"\n key = bottle.request.get_cookie(COOKIE_NAME)\n cursor.execute(sql,(key,))\n data = cursor.fetchone()\n if data:\n return data[0]\n return None", "def get_user():\n try:\n u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a new SparseVector with the selected entries.
def select(self, keys): new = SparseVector() for key in keys: val = self.get(key) if val is not None: new[key] = val return new
[ "def make_sparse_vector(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def select_vec(self, evecs):", "def scipy2sparse(vec, eps=1e-9):\n vec = vec.tocsr()\n assert vec.shape[0] == 1\n return [(int(pos), float(val)) for pos, val in zip(vec.indices, vec.data) if np.abs(va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply the function f(key, val) > new value, in place
def iapply(self, f): for key, val in self.iteritems(): self[key] = f(key, val) return self
[ "def update_mapping(mapping, val, func):\n if val in mapping.keys():\n mapping[val].append(func)\n else:\n mapping[val] = [func]", "def modifiyItems(dic, keyFunction, valueFunction):\n return {keyFunction(key, value): valueFunction(key, value) for key, value in dic.items()}", "def compose...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a random vector with the same keys and dimensions as x
def random(cls, x, sigma=1.0): rand = cls() for key, val in x.iteritems(): if isinstance(val, float): d = random.normalvariate(0, sigma) else: d = np.random.normal(scale=sigma, size=x[key].shape) rand[key] = d return rand
[ "def random_vector():\n X = random.randint(VECTOR_LEN*(-1), VECTOR_LEN)\n Y = random.randint(VECTOR_LEN*(-1), VECTOR_LEN)\n return [X, Y]", "def get_uniform_keys(n_keys, depth, dim, normalized, seed):\n rng = np.random.RandomState(seed)\n bound = 1 / math.sqrt(dim)\n X = rng.uniform(-bound, boun...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that finite difference estimates of slope converge to as ||delta|| > 0 f_grad_x is a sparse vector, representing the gradient of f evaluated at x
def gradient_check(f, f_grad_x, x, direction=None, verbose=False, precision=1e-4): if direction is None: # initialize random direction direction = SparseVector.random(x) # normalize to be unit vector delta = direction * (1.0 / direction.norm2()) # compute slope in direction of delta ...
[ "def check_grad(fcn,theta0,delta):\n x,dx = fcn(theta0)\n for i in range(len(theta0)):\n theta = theta0.copy()\n theta[i]=theta0[i]+delta\n xp,_ = fcn(theta)\n theta[i]=theta0[i]-delta\n xn,_ = fcn(theta)\n est_grad = (xp-xn)/2/delta\n print('Estimate gradient:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If A and B share at least one grandparent but do not share a parent, return one of the shared grandparents, else return None.
def is_cousin(parent_db, A, B): db = {} for pair in parent_db: if(pair[1] not in db): db[pair[1]] = [pair[0]] else: db[pair[1]] += [pair[0]] parentsA = db[A] parentsB = db[B] par = set(parentsA+parentsB) if(not len(par) == len(parentsA)+len(parentsB)): return None ...
[ "def common_ancestor(parent_list_0, parent_list_1):\n for b in parent_list_0[::-1]:\n if b in parent_list_1:\n return b\n return None", "def common_ancestor(node_a, node_b):\n ancestors_a = ancestors(node_a)\n ancestors_b = ancestors(node_b)\n lowest_ance...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Using production rules from grammar expand root into all legal phrases.
def all_phrases(grammar, root): print(grammar) print(root) input() pass
[ "def load_grammar(self):\n \n grammar = {}\n processing_expansion = False\n add_as_key = True\n nt = []\n prod = \"\"\n for l in self.read_next_line():\n #utility.debug_print(l , \"line read\")\n if not l: #empty line\n #utility.d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Symbolic tracing API Given an ``nn.Module`` or function instance ``root``, this function will return a ``ColoGraphModule`` constructed by recording operations seen while tracing through ``root``. With ``meta_args``, we can trace the model that are untraceable subject to control flow. If specified using ``meta_args`` on...
def symbolic_trace( root: Union[torch.nn.Module, Callable[..., Any]], concrete_args: Optional[Dict[str, Any]] = None, meta_args: Optional[Dict[str, Any]] = None, trace_act_ckpt=False, ) -> ColoGraphModule: graph = ColoTracer(trace_act_ckpt=trace_act_ckpt).trace(root, concrete_args=concrete_args, met...
[ "def graphTrackCtx(*args, **kwargs):\n\n pass", "def extract_compiled_graph(model: fx.GraphModule, example_inputs) -> Callable:\n lazy_args = [arg.to(device=\"lazy\") for arg in example_inputs]\n args_tensor_ids = [lazy.get_tensor_id(lazy_arg) for lazy_arg in lazy_args]\n tensor_id_to_arg_idx = {tenso...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will return a matching compiled regular expression to use in barcode values that have a fixed length serial number field but not parenthesis in the app identifiers.
def get_no_parens_numeric_gs1_01_21_optional_17_10(serial_number_length=12): pattern = _NO_PARENS_NUMERIC_GS1_01_21_OPTIONAL_17_10.replace( '{%serial_number_length%}', str(serial_number_length) ) return re.compile(pattern)
[ "def match_pattern(barcode_val: str, max_serial_number_length=14):\n match = False\n matches = []\n barcode_val = str(barcode_val)\n if barcode_val.startswith('(01)'):\n match = SGTIN_SN_10_13_ALPHA.match(\n barcode_val\n )\n elif barcode_val.startswith('01'):\n if len...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will use the regular expressions in this module to find common barcode components and return the match. For an example of how to use this see the `gs123.conversion.BarcodeConverter._populate` function.
def match_pattern(barcode_val: str, max_serial_number_length=14): match = False matches = [] barcode_val = str(barcode_val) if barcode_val.startswith('(01)'): match = SGTIN_SN_10_13_ALPHA.match( barcode_val ) elif barcode_val.startswith('01'): if len(barcode_val) ...
[ "def extract_barcode(record, eb):\n seq = record.sequence[eb.start:eb.end]\n qual = record.quality[eb.start:eb.end]\n return (eb.sequence_tag, seq, 'Z'), (eb.quality_tag, qual, 'Z')", "def get_type(self):\n for card, pattern in CARDS.items():\n if pattern.match(self.number):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract the categories from the given JSON dict into the given categories dict. Raise value error in case of invalid syntax.
def _extract_cats(cls, d: dict, prefix: str, cat_data: dict): if not isinstance(cat_data, dict): raise ValueError("Dict expected.") if prefix != "": prefix += cls.SEPARATOR_CATS for key, value in cat_data.items(): if not isinstance(key, str): ...
[ "def format_categories(cls, results, extra=None):\n response = {}\n response = results\n\n categoryDict = defaultdict(list)\n\n bindings = results['results']['bindings']\n for binding in bindings:\n category = binding[\"Category\"]['value']\n subcategory = bi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all nodes connected to (start_x, start_y), and whether this region is surrounded.
def connectedComponent(start_x, start_y): direc = [(1, 0), (-1, 0), (0, 1), (0, -1)] nodes_visited = set() is_surrounded = True stack = [(start_x, start_y)] while stack: curr_x, curr_y = stack.pop() nodes_visited.add((curr_x, cu...
[ "def is_connected(self, start: Tuple[float, float]) -> bool:\n return self._map.is_connected(start)", "def get_connected_nodes(self) -> set:\n return self.connected_nodes", "def findEdges(self):\n for nc in self.nodes:\n x = nc[0]\n y = nc[1]\n nc_neighbours...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate the Divident Yield based on the given market price. This is one of the endpoints.
def calculate_dividend_yield(self, market_price): if self.ct == CompanyEntry.CT.COMMON: # The "Pal Value" is ignored. Also fixed_dividend is ignored? return self.last_dividend / market_price elif self.ct == CompanyEntry.CT.PREFERRED: # is 'last_dividend' ignored? ...
[ "def calc_dividend_yield(self):\n if self._price is None:\n return 0\n if self._type is Type.common:\n return self._last_dividend / self._price\n return (self._fixed_dividend * self._par_value) / self._price", "def dividend_yield(self):\r\n return self.dividend / ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a string with hex bytes to a numeric value
def hex_str_to_int(input_str): try: val_to_return = int(input_str, 16) except Exception as e: val_to_return = 0 print(e) return val_to_return
[ "def eval_hex_string_to_number(string_number :str) -> Optional[int]:\n is_hex = re.compile(HEX_REGEX_MATCH)\n is_there_match = is_hex.search(string_number)\n if is_there_match:\n return int(is_there_match.group(\"hex\"),16)\n return None", "def hex_to_denary(hex_str: str) -> int:\r\n hex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Join 'xs' with semicolons.
def scjoin(xs): return ';'.join(str(x) for x in xs)
[ "def cjoin(xs):\n return ','.join(str(x) for x in xs)", "def _safe_join(sep, elems):\n return sep.join(elem.replace(sep, \"\") for elem in elems)", "def scprint(xs):\n print(scjoin(xs), end='', flush=True)", "def WrappedJoin(items, separator=..., width=...):\n ...", "def q_join(lst, sep=','):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print 'xs', joined by semicolons, on a single line. CMake friendly.
def scprint(xs): print(scjoin(xs), end='', flush=True)
[ "def scjoin(xs):\n return ';'.join(str(x) for x in xs)", "def xprint(buf):\n if DAEMONIZE:\n return\n\n print buf", "def print_plus_sequence(ncol):\n for _ in range(ncol):\n print_plus()\n ncall(print_dash, CELL_LENGTH)\n print_plus()\n print_end()", "def print_each(xs, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Join 'xs' with commas.
def cjoin(xs): return ','.join(str(x) for x in xs)
[ "def scjoin(xs):\n return ';'.join(str(x) for x in xs)", "def join(arr):\n return \",\".join(str(stringify_weird(x)) for x in arr)", "def q_join(lst, sep=','):\r\n return sep.join(dquote(itm) for itm in lst)", "def papa_join(*fields):\n strings = []\n for field in fields:\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge kernel lists without duplicated meta.length; ignore later ones.
def unique(kernels): r, s = list(), set() for kernel in kernels: if isinstance(kernel.length, list): key = tuple(kernel.length) + (kernel.scheme,) else: key = (kernel.length, kernel.scheme) if key not in s: s.add(key) r.append(kernel) r...
[ "def _merge_meta_device(meta_devices, meta_device_mapping, scenario, original_to_merged_mapping):\n device_mapping = meta_device_mapping.copy()\n merged_endpoints = set(device_mapping.keys()).copy()\n\n if not merged_endpoints:\n merged_endpoint = meta_devices.pop()\n merged_endpoints.add(mer...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate function to populate the kernel function pool.
def generate_cpu_function_pool(functions): function_map = Map('function_map') precisions = { 'sp': 'rocfft_precision_single', 'dp': 'rocfft_precision_double' } populate = StatementList() for f in functions: length, precision, scheme, transpose = f.meta.length, f.meta.precisi...
[ "def build_kernel(self):\n ...", "def _compile_kernels(self) -> None:\n ...", "def gen_custom_ops_registration(\n *,\n native_functions: Sequence[NativeFunction],\n selector: SelectiveBuilder,\n kernel_index: ETKernelIndex,\n rocm: bool,\n) -> Tuple[str, str]:\n\n # convert kerne...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of kernel filenames.
def list_generated_kernels(kernels): return [kernel_file_name(x) for x in kernels if not x.runtime_compile]
[ "def get_kernel_registration_files(ort_root=None, include_cuda=False):\n\n if not ort_root:\n ort_root = os.path.dirname(os.path.abspath(__file__)) + \"/../..\"\n\n provider_path = ort_root + \"/onnxruntime/core/providers/{ep}/{ep}_execution_provider.cc\"\n contrib_provider_path = ort_root + \"/onnx...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given kernel info namespace, return reasonable file name.
def kernel_file_name(ns): assert hasattr(ns, 'length') length = ns.length if isinstance(length, (tuple, list)): length = 'x'.join(str(x) for x in length) postfix = '' if ns.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_CC': postfix = '_sbcc' elif ns.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_R...
[ "def kernelFilenameIO(fn, val=''):\r\n fn = glob.glob(fn)[0]\r\n return _kernelFileIO(fn, val)", "def _guess_corenlp_name(k):\r\n bname = os.path.basename(k.doc)\r\n if bname.startswith('file'):\r\n return None\r\n\r\n corenlp_out_file = bname + '.xml'\r\n return corenlp_out_file", "def def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of small kernels to generate.
def list_small_kernels(): kernels1d = [ NS(length= 1, threads_per_block= 64, threads_per_transform= 1, factors=(1,)), NS(length= 2, threads_per_block= 64, threads_per_transform= 1, factors=(2,)), NS(length= 3, threads_per_block= 64, threads_per_transform= 1, factors=(3,)), ...
[ "def list_large_kernels():\n\n sbcc_kernels = [\n NS(length=50, factors=[10, 5], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'true'}, threads_per_block=256),\n NS(length=52, factors=[13, 4], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'true'}),\n NS(length...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of fused 2D kernels to generate.
def list_2d_kernels(): fused_kernels = [ NS(length=[4,4], factors=[[2,2],[2,2]], threads_per_transform=[2,2], threads_per_block=8), NS(length=[4,8], factors=[[2,2],[4,2]], threads_per_transform=[2,2], threads_per_block=16), NS(length=[4,9], factors=[[2,2],[3,3]], threads_per_transform=[2,3]...
[ "def get_kernels():\n return ['linear', 'rbf']", "def _get_fused_kernels_supported_devices() -> List[str]:\n return [\"cuda\", \"xpu\", torch._C._get_privateuse1_backend_name()]", "def generatingKernel():\n\n # https://www.quora.com/What-is-the-difference-between-edge-detection-Sobel-detection-and-...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of large kernels to generate.
def list_large_kernels(): sbcc_kernels = [ NS(length=50, factors=[10, 5], use_3steps_large_twd={ 'sp': 'true', 'dp': 'true'}, threads_per_block=256), NS(length=52, factors=[13, 4], use_3steps_large_twd={ 'sp': 'true', 'dp': 'true'}), NS(length=60, factor...
[ "def list_small_kernels():\n\n kernels1d = [\n NS(length= 1, threads_per_block= 64, threads_per_transform= 1, factors=(1,)),\n NS(length= 2, threads_per_block= 64, threads_per_transform= 1, factors=(2,)),\n NS(length= 3, threads_per_block= 64, threads_per_transform= 1, factors=(3,))...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a single kernel file for 'kernel'. The kernel file contains all kernel variations corresponding to the kernel meta data in 'kernel'. A list of CPU functions is returned.
def generate_kernel(kernel, precisions, stockham_aot): args = [stockham_aot] # 2D single kernels always specify threads per transform if isinstance(kernel.length, list): args.append(','.join([str(f) for f in kernel.factors[0]])) args.append(','.join([str(f) for f in kernel.factors[1]])) ...
[ "def createKernelSpec():\n tmpd = tempfile.mkdtemp(suffix=\"_nbdiff_ipythondir\")\n kernelsPath = os.path.join(tmpd, \"kernels\")\n os.mkdir(kernelsPath)\n rootKernelPath = os.path.join(kernelsPath, \"root\")\n os.mkdir(rootKernelPath)\n kernel_file = open(os.path.join(rootKernelPath, \"kernel.jso...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate and write kernels from the kernel list. Entries in the kernel list are simple namespaces. These are passed as keyword arguments to the Stockham generator. A list of CPU functions is returned.
def generate_kernels(kernels, precisions, stockham_aot): import threading import queue # push all the work to a queue q_in = queue.Queue() for k in kernels: q_in.put(k) # queue for outputs q_out = queue.Queue() def threadfunc(): nonlocal q_in nonlocal q_out ...
[ "def generate_kernel(kernel, precisions, stockham_aot):\n\n args = [stockham_aot]\n # 2D single kernels always specify threads per transform\n if isinstance(kernel.length, list):\n args.append(','.join([str(f) for f in kernel.factors[0]]))\n args.append(','.join([str(f) for f in kernel.factor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crawl daily stock data from TWSE
def crawler(date_time): def _str_to_float(x): """The raw data of price is a string object, we need to trans it to float.""" try: x = pd.to_numeric(x) return x except ValueError: return -1 page_url = 'http://www.twse.com.tw/exchangeReport/MI_INDEX?resp...
[ "def pull_all_symbols(self):\n self._enforce_crawl_delay()\n dl_url: str = 'https://api.nasdaq.com/api/screener/stocks?tableonly=true&limit=25&offset=0&download=true'\n req = requests.get(dl_url, headers=self.request_headers)\n self.last_crawl = datetime.datetime.now()\n if req.st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read flob to get data in phylip format. The user would generally not need to call this method directly. It is called by read() etc.
def readOpenPhylipFile(self, flob, nTax, nChar): gm = ['Alignment.readOpenPhylipFile()'] dbug = False if dbug: print("\nreadOpenPhylipFile here") if hasattr(flob, 'name'): print(" fileName is %s" % flob.name) print(" nTax is", nTax) ...
[ "def loaddata(self, f):\n\n if isinstance(f, str):\n try:\n fin = open(f)\n except IOError as e:\n raise LUTError(\"IO error on \" + f + \" - \", e.args[1])\n try:\n return self._loaddata(fin)\n except LUTError:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write self in Nexus format. If writeDataBlock=1, then a data block is written, rather than the default, which is to write a taxa and a characters block. Flat gives sequences all on one line. Append, if 0, writes NEXUS first. If 1, does not write NEXUS. userText is anything, eg a comment or another Nexus block, that you...
def writeNexus(self, fName=None, writeDataBlock=0, interleave=0, flat=0, append=0, userText=''): gm = ["Alignment.writeNexus()"] if not fName: fName = sys.stdout if fName == sys.stdout: f = sys.stdout if append: pass else: ...
[ "def write_block(self, address, data):\n self.transfer(0x01, address, data, 0)", "def write(self, block, data):\n log.debug(\"write block #{0}\".format(block))\n assert(len(data) == 4)\n assert(block > 3)\n if not self._page == block / 256:\n self._page = block / 256\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the alignment in Phylip format. If interleave is turned off, then sequences are written sequentially. Phylip and phyliplike formats are too varied. The strict Phylip format has a set number of spaces for the taxon name, and there may not necessarily be a space between the name and the sequence. The name size is c...
def writePhylip(self, fName=None, interleave=False, whitespaceSeparatesNames=True, flat=False, append=True, offset=1): gm = ['Alignment.writePhylip(fName=%s, interleave=%s, whitespaceSeparatesNames=%s, flat=%s, append=%s)' % ( fName, interleave, whitespaceSeparatesNames, flat, append)] # F...
[ "def write_alignment(self, alignment, id_width=_PHYLIP_ID_WIDTH):\n handle = self.handle\n\n if len(alignment) == 0:\n raise ValueError(\"Must have at least one sequence\")\n length_of_seqs = alignment.get_alignment_length()\n for record in alignment:\n if length_of...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests FileGenerator.create_files(). Tests FileGenerator's ability to properly create files in GCS by running extract jobs on staging tables.
def test_create_files(self, project_id): if not project_id: raise Exception( 'Test needs project_id to pass. ' 'Add --project_id={your project ID} to test command') # create sample staging table staging_table_id = '50_STRING_50_NUMERIC_10_213B' ...
[ "def test_repo_create_file(self):\n pass", "def setUp(self):\n self.setUpPyfakefs()\n\n self.fs.CreateFile('/empty_file', contents='')\n\n self._test_contents = 'Hello, World! This is a test file.'\n self._file_size = len(self._test_contents)\n self.fs.CreateFile('/hello_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests FileGenerator._compose_sharded_blobs(). Tests FileGenerator's ability to properly compose multiple sharded blobs into one blob.
def test_compose_sharded_blobs(self, project_id): if not project_id: raise Exception( 'Test needs project_id to pass. ' 'Add --project_id={your project ID} to test command') self.file_generator = load_file_generator.FileGenerator( project_id, self....
[ "def split_images():\n home_dir = get_directory()\n\n count = 0\n for f_name in glob(home_dir + \"/data/raw/facades/**/*.jpg\", recursive=True):\n\n # load image and find bounds\n tmp_img = Image.open(f_name)\n width, height = tmp_img.size\n middle = int(math.ceil(width / 2))\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the beacons pushed by this parent
def beacons(self, request, pk=None): parent = self.queryset.get(id=pk) return parent.beacons.all()
[ "def fetchNewBeacons():\n try:\n # http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.list_objects\n resp = s3.list_objects(Bucket=bucketName)\n objects = resp['Contents']\n beacons = []\n # beacons = [obj.split(':')[1] for obj in objects if 'AGENT:' in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The value is obtained by first computing the expected value from the discrete support. Second, the inverse transform is then apply (the square function).
def _value_transform(self, value_support): value = self._softmax(value_support) value = np.dot(value, range(-self.value_support_size, self.value_support_size + 1)) value = np.sign(value) * ( ((np.sqrt(1 + 4 * 0.001 * (np.abs(value) + 1 + 0.001)) - 1) / (2 * 0.001)) ** 2 - 1 )...
[ "def _value_transform(self, value_support: np.array) -> float:\n value = self._softmax(value_support)\n value = np.dot(value, range(self.value_support_size))\n return np.asscalar(value)", "def SquareValue(v):\r\n return v * v", "def safeInverse(self, value):\n if value < 1e-20 and val...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts x value to gene representation.
def _to_genes(self, x, scope): x = scope.index(x) x = self._converter.convert(x, self._digits) return x
[ "def binary_gene():\n return Gene([0, 1])", "def x(self, v):\n return self._ring_coordinates_gens['x'+str(v)]", "def transform_x(self, x):\n raise NotImplementedError()", "def string_gene():\n return Gene([\"a\", \"b\", \"c\"])", "def flyGene(self, gene):", "def _get_gene_value_object(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update IPAM Subnet. Updates allocation pools, dns zones, or EAs for the subnet in the Infoblox backend.
def update_subnet(self, rollback_list, subnet_request): neutron_subnet = self._build_subnet_from_request(subnet_request) ib_network = self._get_ib_network(neutron_subnet['id'], neutron_subnet['ip_version']) if not ib_network: raise exc.Infobl...
[ "def update_subnet(self, request):", "def update_subnet_pool(self, subnet_pool, **attrs):\n return self._update(_subnet_pool.SubnetPool, subnet_pool, **attrs)", "def subnet_put_api(subnetid=None):\r\n try:\r\n if not subnetid:\r\n return err_return('subnetId is required', \"Parameter...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allocate an IP address based on the request passed in.
def allocate(self, address_request): # Validate if network is available for which port # association request came. # This handle case where subnet is in process of deletion and # port allocation comes for update_port. if not self._ib_network: raise Exception("IB Netwo...
[ "def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec):\n admin_context = context.elevated()\n network = db.network_get_by_uuid(admin_context, quantum_net_id)\n address = None\n if network['cidr']:\n address = db.fixed_ip_associate_pool(admin_context,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function builds a dictionary with keys the labels of the display formulas in the document and values the number that LaTeX asigns, which is taken from the newlist command in the .aux document that LaTeX generates when compiling
def generateLabelsDict(auxiliarDocumentString, labelsList): labelsDict={} newLabelsDict=seekLabels(auxiliarDocumentString) for label in labelsList: if label in newLabelsDict.keys(): labelsDict[label]=newLabelsDict[label] else: labelsDict[label]=label return labelsDict
[ "def _update_latex_labels(self):\n _parameters = [\n list(value.keys()) for value in self.values()\n ]\n _parameters = [item for sublist in _parameters for item in sublist]\n self._latex_labels = {\n param: latex_labels[param] if param in latex_labels.keys() else\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the resource and return the response and index_id
def create(self, wait: bool = True, **kwargs) -> Tuple[Dict, Optional[str]]: combined_kwargs = {self.name_key: self.name} combined_kwargs.update(self.processed_config) combined_kwargs.update(kwargs) client_method = getattr(self.service_client, "create_{}".format(self.sdk_name)) ...
[ "def create(event, _):\n logger.info(\"Got Create\")\n\n if 'ResourceProperties' not in event:\n raise ValueError(\"Please provide resource properties\")\n required_properties = ['IndexName',\n 'Edition',\n 'IndexRoleArn',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ID using self.name Requires that self.ztid is set and that it is unique. Should only be called during `__init__` to set `self.index_id`. If it returns a string, it means this resource exists.
def _get_index_id_from_ztid(self) -> Optional[str]: for res in self.list_with_tags(self.session, self.region_name): # type: AWSResource if res.ztid == self.ztid: return res.index_id
[ "def _get_index_id_from_name(self) -> Optional[str]:\n pass", "def _id_from_name(resource_config, resources, typename):\n return obj_from_name(resource_config, resources, typename).id", "def get_id(self, name):\n try:\n return self.d[name.replace(' ', '_')]\n except KeyError:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ID using self.name Requires that self.name is set and that it is unique. Should only be called during `__init__` to set `self.index_id`. If it returns a string, it means this resource exists. Will not be called if `self.index_id_key == self.name_key`. Just raise NotImplementedError in that case.
def _get_index_id_from_name(self) -> Optional[str]: pass
[ "def _id_from_name(resource_config, resources, typename):\n return obj_from_name(resource_config, resources, typename).id", "def get_id(self, name):\n try:\n return self.d[name.replace(' ', '_')]\n except KeyError:\n return None", "def object_id(self, name: str) -> plasma....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Provide Manager and ztid tags
def standard_tags(res: AWSResource) -> Mapping: return {manager_tag_key: res.manager, 'ztid': str(res.ztid or uuid.uuid4())}
[ "def patch_tags(\n self,\n resource_group_name: str,\n network_manager_name: str,\n parameters: IO,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.NetworkManager:", "def post_register(self, manager):\n pass", "def ex_cre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets all stoppage events from unparsed game data
def get_all_stoppage(game_data): all_plays = game_data['liveData']['plays']['allPlays'] # Will return [] if none stoppage = [d for d in all_plays if d['result']['event'] == 'Stoppage'] ''' prices = { 'ACME': 45.23, 'AAPL': 612.78, 'IBM': 205.55, 'HPQ': 37.20, ...
[ "def _test_stream_stop(self):\n return [\n WatermarkEvent(\n timestamp.MAX_TIMESTAMP, _TestStream.WATERMARK_CONTROL_TAG)\n ]", "def get_events(data):\n\n return data[\"events\"]", "def on_stop(self):\n # Inform the public\n log = self._parent.logger.entry()\n log....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
figures out what the time and period should be uses parsed game data from get_parsed_game_data
def get_game_time_and_period(game_data): info = {'time': '', 'period': ''} # Game has not started if game_data['abstractGameState'] == 'Preview': info['period'] = "PRE" info['time'] = '00:00' # Game over elif game_data['abstractGameState'] == 'Final': info['period'] = "END"...
[ "def user_game_analysis(self):\n # go throught all data and record two dictionaries\n if self.played_required:\n print(\"games with playtime 0 is neglected\\n\")\n game_stat = {}\n user_stat = {}\n useful_user_num = 0\n for id in self.user_game_data:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to calculate the intersection over union of tokens in headline and body. Tokens for headline are extracted based on the text between single or double quotes.
def word_overlap_quotes_features(headlines, bodies): X = [] for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))): clean_headline = clean(headline) clean_body = clean(body) clean_headline = get_tokenized_quotes(clean_headline) clean_body = get_tokenized_lemmas(clean_...
[ "def body_words_in_headline(self,doc):\n features = defaultdict(int)\n analyze = self.build_analyzer()\n headline_tokens=analyze(doc[0])\n body_tokens=analyze(doc[1])\n #headline_token_counts=defaultdict(int)\n body_token_counts=defaultdict(int)\n for token in body_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to calculate cosine similarity between the tfidf vectors for headline and body. Vocab size is for TFIDF Vectorizer is calculated by taking 5000 most occurring words in headline and body. It returns a single feature which is the cosine similarity value for a headline and body vector.
def word_tfidf_features(headlines, bodies): total_vocab = [get_tokenized_pos(clean(line)) for line in tqdm(headlines+bodies)] print ("\n\n total vocab size - \n") print(len(total_vocab)) total_vocab_flatten = [word for subword in total_vocab for word in subword] word_counter = Counter(total_vocab_f...
[ "def compute_similarity():\n movie_data = pd.read_csv(\"movie_recsys/datasets/movie_data.csv\")\n\n # Compute TF-IDF representation.\n tfidf = TfidfVectorizer(stop_words=\"english\")\n tfidf_matrix = tfidf.fit_transform(movie_data[\"story\"])\n\n # Compute Cosine Similarity.\n cosine_sim_scores = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to calculate cosine similarity between the tfidf vectors for headline and body. Method splits the body in 4 parts containing equal number of sentences. Vocab size is for TFIDF Vectorizer is calculated by taking 5000 most occurring words in headline and body. It returns a single feature which is the max cosine si...
def word_tfidf_pos_ss_features(headlines, bodies): total_vocab = [get_tokenized_pos(clean(line)) for line in tqdm(headlines+bodies)] total_vocab_flatten = [word for subword in total_vocab for word in subword] word_counter = Counter(total_vocab_flatten) most_occur = word_counter.most_common(5000) vo...
[ "def word_tfidf_features(headlines, bodies):\n total_vocab = [get_tokenized_pos(clean(line)) for line in tqdm(headlines+bodies)]\n print (\"\\n\\n total vocab size - \\n\")\n print(len(total_vocab))\n\n total_vocab_flatten = [word for subword in total_vocab for word in subword]\n word_counter = Count...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform mode setup and defaults for status pins
def __setup_status_pins(self): self.pi.set_mode(self.RUNNING_LED_PIN, pigpio.OUTPUT) self.pi.set_mode(self.FLASH_STATUS_PIN, pigpio.OUTPUT) self.pi.set_mode(self.CLEAR_MODE_STATUS_PIN, pigpio.OUTPUT) self.pi.set_mode(self.DECK_EMPTY_STATUS_PIN, pigpio.OUTPUT) self.pi.set_mode(sel...
[ "def choose_mode( self, ):\r\n # =========== add your modes as desired starting here ========\r\n # ---------->> call modes here; I comment out ones I am not using. Makes it really easy to switch modes\r\n # these are modes I use, pretty much one for each micro-controller\r\n # proje...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Blink the FLASH_STATUS_PIN until a timeout or wait_function returns true Blinks over BLINK_INTERVAL and constantly checks timeout if it is not none. if timeout is None, then blink_until will continue blinking until wait_function returns true When the function exits, the FLASH_STATUS_PIN is brought LOW
def blink_until(self, wait_function, timeout=None): timer = time.time() next_end_cycle = timer + self.BLINK_INTERVAL blink_status = True while not wait_function(): # check if there was a timeout if (timeout is not None and (time.time() - timer > timeout)): ...
[ "def Blink(t):\n\tGPIO.output(24,True) #Turn LED on\n\ttime.sleep(t) # Wait t seconds\n\tGPIO.output(24,False) # Turn LED off", "def whileFlashBusy(self):\n wait_time = 0\n\n self.flash_hw_qspi_cs_enable()\n self.flash_hw_qspi_write8(HW_QSPI_COMMON_CMD.READ_STATUS_REGISTER)\n while Tru...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }