query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Test the _get_application_process utility.
def test_get_application_process(self): from supvisors.rpcinterface import RPCInterface # prepare context self.supervisor.supvisors.context.applications = { 'appli_1': 'first application'} self.supervisor.supvisors.context.processes = { 'appli_1:proc_1': 'first pr...
[ "def test_get_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known app...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
物品图片post一定要post到传入aid的相册id内 人人后台目前不支持图片上传 返回tuple, (状态,内容,信息)
def create_photo_post(self, merchandise, aid, message=None, **kwargs): TAG = 'renren create photopost' logger.info(u"%s:%s" % (TAG, u'%r 发布照片中...' % self)) from dashboard.listing.models import Item, Fund # initiate status message if isinstance(merchandise, Item): tex...
[ "def postimg(self,imgurl):\n if self.is_downloadable(imgurl) == True: \n pass\n else:\n return None\n\n \"\"\"\n Download the image from URL and put it in Downloads\n \"\"\"\n try:\n urllib.request.urlretrieve(imgurl,'%s/downl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
返回用户相册列表 已列表形势返回所有数据, 出错则返回空列表 返回(状态,内容,信息)
def get_album_list(self, **kwargs): TAG = u'renren get album list' SUCCESS_MSG = u'人人获取相册列表成功' ERROR_MSG = u'人人获取相册列表失败' logger.info(u"%s:%s" % (TAG, u'%r 获取相册列表中...' % (self))) path = '/v2/album/list' method = 'GET' try: r = self._renrenAPI(pat...
[ "def list(self):\n\n base_url = ''.join((\n self.BASE_URL + '/users/',\n self.__user_data.get('login') + '/gists',\n ))\n\n response = requests.get(base_url, headers=self.__headers)\n\n if response.status_code == 200:\n return response.json()\n\n raise GistException(Gist.__get_response...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple checking function whether an url exists or not for an API for which we expect
def url_was_found(url="localhost:5000/health"): res = requests.get(url).json() if res['status_code'] == 200: return True elif res['status_code'] == 404: return False else: raise UnexpectedResponseError("Expected 200 OK or 404, got {}.\n".format(res['status']), "Full response : {...
[ "def url_exists(url):\n request = requests.get(url)\n if request.status_code == 200:\n exist = True\n else:\n exist = False\n return exist", "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to extract a list of genes and write to file
def get_genes(infile,outfile): gene_list = [] with open(infile) as gene: tag = False for line in gene: if line.startswith('name'): tag = True continue if tag: items = line.split() if len(items) > 0: ...
[ "def get_genes(baits_file, outFile):\n\twith open(outFile, 'w') as f:\n\t\trecords = list(SeqIO.parse(baits_file, 'fasta'))\n\t\tfor record in records:\n\t\t\tf.write(record.id[11:]+'\\n')", "def saveTmdbGenres():\n \n listGenres = tmdb.Genres().list()[\"genres\"]\n \n genres = { _format(g[\"name\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open a square. The square is added to `self.opened`. If you survive, the number of mines around xy is published in `self.mines_near[xy]`. If you die, the square is also added to `self.flagged`, and `self.mines_near[xy]` is set to 'mine' instead of a number.
def open(self, xy): if xy in self.opened: return self.opened.add(xy) if xy in self._mines: self.mines_near[xy] = 'mine' self.flag(xy) # simplifies playing after death logic self.lose() else: self.mines_near[xy] = len(s...
[ "def open(self, xy):\n if xy in self.opened: # if the cell id already opened, do nothing\n return\n\n self.opened.add(xy) # add to the list of opened cells\n if xy in self._mines: # if mine, update status to M\n self.mines_busted.add(xy) # add cell xy to the ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
paste knob_value into new node
def paste_val(node, knob_name, knob_value): if node.knob(knob_name) is not None: node.knob(knob_name).setValue(knob_value)
[ "def read_knob_val(node, knob_name):\n\n try:\n return node[knob_name]\n except KeyError:\n return \"\"\n except NameError:\n return \"\"", "def addFactor(self, knobFactor: cern.lsa.domain.settings.KnobFactor) -> _AbstractKnobBuilder__T:\n ...", "def loopnodes(knobs={}):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read data from given node, create and fill ConvertNode object and return it
def create_convert_node(node): try: file = read_knob_val(node, "file").getValue() first = int(read_knob_val(node, "first").getValue()) last = int(read_knob_val(node, "last").getValue()) first2 = int(read_knob_val(node, "origfirst").getValue()) last2 = int(read_knob_val(node,...
[ "def convert(self, node):\n # get the conversion lut\n node_type = self.get_node_type(node)\n conversion_specs = self.conversion_spec_sheet.get(node_type)\n if not conversion_specs:\n print('No conversion_specs for: %s' % node_type)\n return\n\n # call any ca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
try to read the knob value; if it doesn't exist return empty String
def read_knob_val(node, knob_name): try: return node[knob_name] except KeyError: return "" except NameError: return ""
[ "def _GUI_Read(s, k):\r\n return s._attr[k]", "def _get_control(self, key):\n \n data = self.client.session.get_device_config(\n self.device.id,\n key,\n 'Control',\n )\n\n # The response comes in a funky key/value format: \"(key:va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
proxy(object[, callback]) create a proxy object that weakly references 'object'. 'callback', if given, is called with a reference to the proxy when 'object' is about to be finalized.
def weakref_proxy(*args, **kwargs): pass
[ "def _proxify(func) -> weakref.ProxyType:\r\n _keep_alive.append(func)\r\n return weakref.proxy(func)", "def fl_set_object_callback(ptr_flobject, pyfn_CallbackPtr, numdata):\n #FL_CALLBACKPTR = cty.CFUNCTYPE(None, cty.POINTER(xfdata.FL_OBJECT),\n # cty.c_long)\n _fl_se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Need to pass a list of Card objects in. The deck should also know what game it's for. That information belongs with the deck since a deck will be made up of cards from just one game. Also, a deck for a given game should be able to tell you whether it's a legal deck for a given format in that game however, this is for s...
def __init__(self, decklist): self.decklist = decklist # Since Card implements __str__(), `print self.decklist` is a # good-enough string representation of the deck. Note that decks # should assert that count > 0 for all cards in the full deck. What # kind of validation do we wa...
[ "def __init__(self):\n \n self.deck = [Card(suit,rank) for suit in SUITS for rank in RANKS]", "def get_cards(self, cards:list):\n for i in cards:\n self.cards.append(i)", "def get_deck():\n deck = []\n for suit in Suit:\n for rank in Rank:\n deck.append(Ca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A mediumdifficulty question removes 1020 random cards from your deck, then asks which of four options the top card of your deck is most likely to be. Should be useful to most games, and it's easy to implement in a gameagnostic way, so it's here instead of in gamespecific code.
def most_likely_top_card(self, deck): question_string = "If {}have been removed from your deck, which of the following cards is most likely to be the top card of your deck?" answer_suffix = "is most likely to be the top card" reduced_deck = copy.deepcopy(deck) cards_to_remove = random.c...
[ "def choose_hand(hand, deck):\n possible = list()\n for c in combinations(hand, 4):\n possible.append([Cribbage.expected_score(list(c), deck), c])\n best = max(possible, key = lambda i : i[0])\n discard = list(set(hand) - set(best[1]))\n return best[1], discard", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a scoped session (according to SQLAlchemy docs, this returns always the same object within a thread, and a different object in a different thread. Moreover, since we update the scopedsessionclass upon forking, also forks have different session objects.
def get_scoped_session(): if scopedsessionclass is None: s = None else: s = scopedsessionclass() return s
[ "def get_session(self):\n if self.__initialized:\n # Get thread id\n thread_id = get_current_thread_id()\n\n # Check if we already have a session for this thread\n if thread_id in self.__sessions.keys():\n if self.__sessions[thread_id] is None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> chequejaCaixa([[2,7,6,3,1,4,9,5,8],[8,5,4,9,6,2,7,1,3],[9,1,3,8,7,5,2,6,4],[4, 6, 8, 1, 2, 7, 3, 9, 5], [ 5, 9, 7, 4, 3, 8, 6, 2, 1], [1, 3, 2, 5, 9, 6, 4, 8, 7],[3, 2, 5, 7, 8, 9, 1, 4, 6], [6, 4, 1, 2, 5, 3, 8, 7, 9], [7, 8, 9, 6, 4, 1, 5, 3, 2]]) True >>> chequejaCaixa([[2,7,5,3,1,4,9,6,8],[8,5,4,9,6,2,7,1,3],[9...
def chequejaCaixa(m): for caixaX in range(3): for caixaY in range(3): #Per una caixa numsUtilitzats ="" for i in range (caixaX*3, caixaX*3 + 3): for j in range(caixaY*3, caixaY*3 + 3): if(m[i][j] < 1 or m[i][j] > 9): ...
[ "def eleccion_casilla(cupos_piso, matriz_piso):\n for fila in range(len(matriz_piso)):\n for columna in range(len(matriz_piso[fila])):\n if not([fila, columna] in cupos_piso): \n # Si no esta en las opciones, se marca como X. Pero si si esta en las opciones no hay que hacer nada por...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The function determines whether dealer has a card that would qualify for discard or not
def dealer_matching(self): if len([card for card in self.dealer_hand if card[1] == '8']) > 0: self.discard_pile = [card for card in self.dealer_hand if card[1] == '8'][0] self.dealer_hand.remove(self.discard_pile) dealer_suits = [card[0] for card in self.dealer_hand] ...
[ "def cardDiscardable(self, card):\n if self.cardDead(card):\n return True\n\n cardAttr = \"\"\n if Suit.toString(card.getSuit()) == \"white\":\n cardAttr = \"w\"\n elif Suit.toString(card.getSuit()) == \"blue\":\n cardAttr = \"b\"\n elif Suit.toStr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifica se as senhas são iguais
def senhas_nao_iguais(senha, senha2): return senha != senha2
[ "def check(spisok):\n if len(set(map(tuple, spisok))) != len(spisok):\n return False\n else:\n return spisok", "def isSukun(archar):\n return archar==SUKUN;\n #return True;\n\t# else: return False;", "def accidentTest(ligne):\r\n for i in range(len(ligne)):\r\n if (l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the updating of calendarfreebusyset xattrs on inboxes
def test_freeBusyUpgrade(self): self.setUpInitialStates() directory = self.directory # # Verify these values require no updating: # # Uncompressed XML value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:calda...
[ "def test_update_calendar(self):\n pass", "def test_update_mailbox(self):\n pass", "def test_update_booking_tag(self):\n pass", "def test_update_function_booking_catering_item(self):\n pass", "def test_update_function_booking_attendee(self):\n pass", "def test_update_boo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that the hierarchy described by "before", when upgraded, matches the hierarchy described by "after".
def verifyDirectoryComparison(self, before, after, reverify=False): root = self.createHierarchy(before) config.DocumentRoot = root config.DataRoot = root (yield self.doUpgrade(config)) self.assertTrue(self.verifyHierarchy(root, after)) if reverify: # Ensure...
[ "def assertBalanceChange (self, before, changes):\n\n after = self.getBalances ()\n assert_equal (len (before), len (changes))\n assert_equal (after, [before[i] + changes[i] for i in range (len (before))])", "def test_backwards(self):\n l1, g1 = mk_graphs('#Aabc / Sabc Scb')\n l2, g2 = mk_g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The upgrade process should remove unused notification directories in users' calendar homes, as well as the XML files found therein.
def test_removeNotificationDirectories(self): before = { "calendars": { "users": { "wsanchez": { "calendar": { db_basename: { "@contents": "", }, ...
[ "def _upgradeUserDataDirFiles(self):\n from os.path import dirname, basename, join, exists\n\n koDirSvc = components.classes[\"@activestate.com/koDirs;1\"].getService()\n currUserDataDir = koDirSvc.userDataDir\n currHostUserDataDir = koDirSvc.userDataDir\n \n # These are th...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that calendar homes in the /calendars/// form whose records don't exist are moved into dataroot/archived/
def test_calendarsUpgradeWithOrphans(self): before = { "calendars": { "users": { "unknownuser": { }, }, "groups": { "unknowngro...
[ "def test_calendarsUpgradeWithDuplicateOrphans(self):\n\n before = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \"calendars\":\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that calendar homes in the /calendars/// form whose records don't exist are moved into dataroot/archived/
def test_calendarsUpgradeWithDuplicateOrphans(self): before = { "archived": { "unknownuser": { }, "unknowngroup": { }, }, "calendars": { "u...
[ "def test_calendarsUpgradeWithOrphans(self):\n\n before = {\n \"calendars\":\n {\n \"users\":\n {\n \"unknownuser\":\n {\n },\n },\n \"groups\":\n {\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unknown files, including .DS_Store files at any point in the hierarchy, as well as nondirectory in a user's calendar home, will be ignored and not interrupt an upgrade.
def test_calendarsUpgradeWithUnknownFiles(self): ignoredUIDContents = { "64": { "23": { "6423F94A-6B76-4A3A-815B-D52CFD77935D": { "calendar": { db_basename: { "@contents": "", ...
[ "def CheckForUnknownFiles(self):\n unknown_files = self.GetUnknownFiles()\n if unknown_files:\n print \"The following files are not added to version control:\"\n for line in unknown_files:\n print line\n prompt = \"Are you sure to continue?(y/N) \"\n answer = raw_input(prompt).strip...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that calendar homes in the /calendars/__uids__// form are upgraded to /calendars/__uids__/XX/YY// form
def test_calendarsUpgradeWithUIDs(self): before = { "calendars": { "__uids__": { "6423F94A-6B76-4A3A-815B-D52CFD77935D": { "calendar": { db...
[ "def test_calendarsUpgradeWithDuplicateOrphans(self):\n\n before = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \"calendars\":\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that calendar user addresses (CUAs) are cached so we can reduce the number of principal lookup calls during upgrade.
def test_normalizeCUAddrs(self): class StubRecord(object): def __init__(self, fullNames, uid, cuas): self.fullNames = fullNames self.uid = uid self.calendarUserAddresses = cuas def getCUType(self): return "INDIVIDUAL" ...
[ "async def create_timezone_cache(self):\n for user_id, timezone in await self.ex.sql.s_user.fetch_timezones():\n user = await self.ex.get_user(user_id)\n user.timezone = timezone", "def _populate_users_cache(self):\n if self.users_values_cache is not None:\n return\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify conversion of old resources.xml format to twext.who.xml format
def test_resourcesXML(self): fileName = self.mktemp() fp = FilePath(fileName) fp.setContent(oldResourcesFormat) upgradeResourcesXML(fp) self.assertEquals(fp.getContent(), newResourcesFormat)
[ "def test_Q13639(self):\n old = SeqIO.read(\"SwissProt/Q13639.txt\", \"swiss\")\n new = SeqIO.read(\"SwissProt/Q13639.xml\", \"uniprot-xml\")\n self.compare_txt_xml(old, new)", "def test_conversion_invalid_xml_xml(self):\n file_path = os.path.join('testfile_st.xml')\n filename =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Spams transactions with the same nonce, and ensures the server rejects all but one
async def test_transaction_nonce_lock(self): no_tests = 20 txs = [] tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10) dtx = decode_transaction(tx) txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY)) for i in range(11, 10 + no_tests): t...
[ "async def test_prevent_out_of_order_txs(self):\n\n tx1 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx1 = decode_transaction(tx1)\n stx1 = sign_transaction(tx1, FAUCET_PRIVATE_KEY)\n tx2 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10, dtx1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Spams transactions with the same nonce, and ensures the server rejects all but one
async def test_prevent_out_of_order_txs(self): tx1 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10) dtx1 = decode_transaction(tx1) stx1 = sign_transaction(tx1, FAUCET_PRIVATE_KEY) tx2 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10, dtx1.nonce + 1...
[ "async def test_transaction_nonce_lock(self):\n\n no_tests = 20\n\n txs = []\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx = decode_transaction(tx)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n for i in range(11, 10 + no_tests):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return controller instance that is based on the equipment role.
def get_controller(equipment, accessmethod, logfile=None): path = _CONTROLLERMAP[accessmethod] constructor = module.get_object(path) return constructor(equipment, logfile)
[ "def controller(self, controller):\n return self.controllers[controller.__name__]", "def get_energy_controller():\n\n ctrl = EnergyBasedController(energy_reference=153.244, lam=0.1)\n return lambda state: ctrl.get_action(state)", "def get_registered_controller(self, model):\n return self._re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the given data frame into a list of lists. When the `row` paremeter is given with a value >= 0, only that row is extracted as list from the data frame.
def as_list(df: pandas.DataFrame, row=-1) -> list: if df is None: return [] if row >= 0: rec = [] for col in range(0, 13): rec.append(df.iat[row, col]) return rec recs = [] for row in range(df.shape[0]): recs.append(as_list(df, row=row))
[ "def df2list_of_lists(data_frame):\n first_row = [\"\"] + list(data_frame.columns.values)\n row_headers = list(data_frame.index.values)\n data = data_frame.values\n list_of_lists = [first_row]\n for idx in xrange(len(row_headers)):\n list_of_lists.append([row_headers[idx]] + list(data[idx]))\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inherite the query here to add the woo instance field for group by.
def _query(self, with_clause='', fields={}, groupby='', from_clause=''): fields['woo_instance_id'] = ", s.woo_instance_id as woo_instance_id" groupby += ', s.woo_instance_id' return super(SaleReport, self)._query(with_clause, fields, groupby, from_clause)
[ "def __init__(self, connection):\n QueryMultiple.__init__(self, connection=connection, object_type=\"group\")", "def aggregate_query(self):\n raise NotImplementedError", "def group_by(self, *args):\n for name in args:\n assert name in self._fields or name in self._calculated_fiel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run clingo with the provided argument list and return the parsed JSON result.
def solve(*args): args = ['clingo','--outf=2']+list(args) print ' '.join(args) clingo = subprocess.Popen( ' '.join(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True ) out, err = clingo.communicate() if err: print err with open('dump...
[ "def solve(*args):\n \n clingo = subprocess.Popen(\n ' '.join(args[0]),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True\n )\n out, err = clingo.communicate()\n if err:\n print err\n \n return parse_json_result(out)", "def cc_json():\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Like solve() but uses a random sign heuristic with a random seed.
def solve_randomly(*args): args = list(args[0]) + ["--sign-def=3","--seed="+str(random.randint(0,1<<30))] return solve(*args)
[ "def solve_randomly(*args):\n args = list(args) + [\"--sign-def=3\",\"--seed=\"+str(random.randint(0,1<<30))]\n return solve(*args)", "def solve_seed(seed: int):\n return solve(Position.deal(seed), GOAL)", "def solve(self):", "def initLocalBestChoice(self):\n random.seed()\n return", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the provided JSON text and extract a dict representing the predicates described in the first solver result.
def parse_json_result(out): result = json.loads(out) assert len(result['Call']) > 0 assert len(result['Call'][0]['Witnesses']) > 0 witness = result['Call'][0]['Witnesses'][0]['Value'] class identitydefaultdict(collections.defaultdict): def __missing__(self, key): ...
[ "def parse(text):\n return {\n \"one\": Strategy.ONE,\n \"topology\": Strategy.TOPOLOGY,\n \"all\": Strategy.ALL\n }[text]", "def parse_json_result(out):\n\n result = json.loads(out)\n \n assert len(result['Call']) > 0\n assert len(result['Call'][0]['Witnesses']) > 0\n \n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a tuple of (constants, functions, properties) constants a sorted list of (name, features) tuples, including all constants except for SCLEX_ constants which are presumed not used by scripts. The SCI_ constants for functions are omitted, since they can be derived, but the SCI_ constants for properties are include...
def GetScriptableInterface(f): constants = [] # returned as a sorted list functions = {} # returned as a sorted list of items properties = {} # returned as a sorted list of items for name in f.order: features = f.features[name] if features["Category"] != "Deprecated": if features["FeatureType"] == "val": ...
[ "def setup_funcs(self, assembler):\n func_list = [functions.KSFailure(assembler, ksweight),\n functions.StructuralMass(assembler),\n functions.Compliance(assembler)]\n return func_list, FUNC_REFS", "def get_functions():\n\treturn [f for f in globals() if f.sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shows the given View controller.
def showViewController(viewController): __PyContentViewController__.shared.setRootViewController(viewController)
[ "def show_view(self):\n self.view_controller.displayView(self.get_view())", "def show(self):\n self.view.show()", "def openController(self, name, parent):\n frame = ICS[name](parent)\n frame.Show()\n return frame", "def show_welcome_view(self):\n self.view.open_view_w...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search a list of (header, value) tuples for a debug header. If found, return the index. Otherwise, return 1.
def find_debug_header(self, header_list): matchers = { "debug_uri": ("location", False), "debug_token": (self.debug_header.lower(), True) } for i, htup in enumerate(header_list): header, val = htup for attr in matchers: h, ret = ma...
[ "def header_index(arg, header):\n for i, ele in enumerate(header):\n if match(arg, ele):\n return i\n raise NameError('column not found')", "def find(self, header):\n header = header.lower()\n for idx, item in enumerate(self._headers):\n if item.lower() == header:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize object that handles counting of words
def __init__(self): self.word_count_dict = {} self.num_comments = 0 self.num_words = 0
[ "def __init__(self, input_string):\n self.words_to_counts = {}\n self.split_and_populate_words(input_string)", "def create_word_to_count(self):\n print(\"Creating the word to count dictionary...\")\n for abstract in self.abstracts:\n abstract = clean_text(abstract, remove_pu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate the class with data. this class gets called from a PptxTable.
def __init__(self, data): self.data = data self.columns = Columns(data) self.rows = Rows(data)
[ "def __init__(self):\n _snap.TTableRow_swiginit(self, _snap.new_TTableRow())", "def __init__(self, data, order_by=None):\n self._data = data\n self._snapshot = None # will store output dataset (ordered...)\n self._rows = Rows(self)\n self._columns = Columns(self)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the column index to account for the headers and updates the data self.data.
def set_column_headers(self, headers): if isinstance(self.columns.idx[0], int): self.data = [sorted(headers)] + self.data increment = [i + 1 for i in self.rows.idx] self.rows.idx = [0] + increment elif isinstance(self.columns.idx[0], str): datum = {} ...
[ "def UpdateColumns(self):\r\n data = self.data\r\n columns = data.getParam('columns',data.tankColumns[:])\r\n col_name = data.getParam('colNames',{})\r\n col_width = data.getParam('colWidths',{})\r\n col_align = data.getParam('colAligns',{})\r\n for index,column in enumerat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the origin's type specs. A TFXIO 'Y' may be a result of projection of another TFXIO 'X', in which case then 'X' is the origin of 'Y'. And this method returns what X.TensorAdapter().TypeSpecs() would return. May equal to `self.TypeSpecs()`.
def OriginalTypeSpecs(self) -> Dict[str, tf.TypeSpec]: return self._original_type_specs
[ "def TypeSpecs(self) -> Dict[str, tf.TypeSpec]:\n return self._type_specs", "def type_spec(self):\n return self._type_spec", "def get_spec_type(self):\r\n return self._spec_type", "def origin_type(self):\r\n\r\n return self.metadata[\"origin\"]['type']", "def numpy_types(self) -> List[np...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the TypeSpec for each tensor.
def TypeSpecs(self) -> Dict[str, tf.TypeSpec]: return self._type_specs
[ "def type_spec(self) -> tf.TypeSpec:\n raise NotImplementedError", "def _tensor_specs(method_name, unused_kwargs, constructor_kwargs):\n width = constructor_kwargs['config'].get('width', 320)\n height = constructor_kwargs['config'].get('height', 240)\n\n observation_spec = [\n tf.contrib.framew...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a batch of tensors translated from `record_batch`.
def ToBatchTensors( self, record_batch: pa.RecordBatch, produce_eager_tensors: Optional[bool] = None) -> Dict[str, Any]: tf_executing_eagerly = tf.executing_eagerly() if produce_eager_tensors and not tf_executing_eagerly: raise RuntimeError( "Eager Tensors were requested but e...
[ "def GetTensor(self, record_batch: pa.RecordBatch,\n produce_eager_tensors: bool) -> Any:", "def convert_batch_ids_to_tensor(self, batch_ids, history_len):\n batch_lens = [len(ids) for ids in batch_ids]\n max_len = max(batch_lens)\n batch_size = len(batch_ids)//history_len\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializer. It can be assumed that CanHandle(arrow_schema, tensor_representation) would return true.
def __init__(self, arrow_schema: pa.Schema, tensor_representation: schema_pb2.TensorRepresentation):
[ "def __init__(self, schema: dict, **kwargs) -> None:\r\n self.case_check = case_check(settings.CASE)\r\n self.ignored_keys = set_ignored_keys(**kwargs)\r\n if read_type(schema) == 'object':\r\n logger.debug('root -> dict')\r\n self.test_dict(schema)\r\n elif read_ty...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the TypeSpec of the converted Tensor or CompositeTensor.
def type_spec(self) -> tf.TypeSpec: raise NotImplementedError
[ "def type_of(self, t, dtype_hint=None):\n if tf.executing_eagerly():\n new_t = tf.convert_to_tensor(value=t, dtype=dtype_hint)\n else:\n with tf.Graph().as_default(): # Use a scratch graph.\n new_t = tf.convert_to_tensor(value=t, dtype=dtype_hint)\n dtype = new_t.dtype.base_dtype.as_numpy...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the RecordBatch to Tensor or CompositeTensor. The result must be of the same (not only compatible) TypeSpec as self.type_spec.
def GetTensor(self, record_batch: pa.RecordBatch, produce_eager_tensors: bool) -> Any:
[ "def _convert_batch_type(self, batch: MultiAgentBatch) -> NestedDict[TensorType]:", "def ToBatchTensors(\n self,\n record_batch: pa.RecordBatch,\n produce_eager_tensors: Optional[bool] = None) -> Dict[str, Any]:\n\n tf_executing_eagerly = tf.executing_eagerly()\n if produce_eager_tensors and ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a ListArray to a dense tensor.
def _ListArrayToTensor( self, list_array: pa.Array, produce_eager_tensors: bool) -> Union[np.ndarray, tf.Tensor]: values = list_array.flatten() batch_size = len(list_array) expected_num_elements = batch_size * self._unbatched_flat_len if len(values) != expected_num_elements: raise Valu...
[ "def list_np_to_tensor(list_of_arrays):\n return torch.stack([array for array in list_of_arrays])", "def sparse_to_dense(self, tensor: tf.Tensor, output_shape: tf.TensorShape) -> tf.Tensor:\n return tf.scatter_nd(self.observations_index, tensor, output_shape)", "def dense_to_sparse(self, tensor: t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds type handlers according to TensorRepresentations.
def _BuildTypeHandlers( tensor_representations: Dict[str, schema_pb2.TensorRepresentation], arrow_schema: pa.Schema) -> List[Tuple[str, _TypeHandler]]: result = [] for tensor_name, rep in tensor_representations.items(): potential_handlers = _TYPE_HANDLER_MAP.get(rep.WhichOneof("kind")) if not potent...
[ "def _build_nodes_shapetype(self):\n # add shape information\n if self._init_net is None:\n return\n for init_op in self._init_net.op:\n for init_arg in init_op.arg:\n if init_arg.name == \"shape\":\n self.shapes[init_op.output[0]] = init_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enumerates nested types along a column_path. A nested type is either a listlike type or a struct type. It uses `column_path`[0] to first address a field in the schema, and enumerates its type. If that type is nested, it enumerates its child and continues recursively until the column_path reaches an end. The child of a ...
def _EnumerateTypesAlongPath(arrow_schema: pa.Schema, column_path: path.ColumnPath, stop_at_path_end: bool = False) -> pa.DataType: field_name = column_path.initial_step() column_path = column_path.suffix(1) arrow_field = arrow_schema.field(field_name) ...
[ "def column_mapper(col):\n prefix = suffix = None\n result_type = col[\"Type\"]\n has_children = False\n\n if result_type.startswith(ARRAYSTRUCT_PREFIX):\n result_type = ARRAYSTRUCT\n prefix = ARRAYSTRUCT_PREFIX\n suffix = ARRAYSTRUCT_SUFFIX\n has_children = True\n elif re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a function that converts a StringArray to BinaryArray.
def _GetConvertToBinaryFn( array_type: pa.DataType) -> Optional[Callable[[pa.Array], pa.Array]]: if pa.types.is_string(array_type): return lambda array: array.view(pa.binary()) if pa.types.is_large_string(array_type): return lambda array: array.view(pa.large_binary()) return None
[ "def create_bytes_array_from_binary_array(self):\n return data_manipulation.convert_from_binary_array(self.binary_array)", "def string_to_array(binary_strings):\n rows = len(binary_strings)\n cols = len(binary_strings[0])\n\n seq_array = np.zeros((rows, cols), dtype=int)\n\n for i, seq in enume...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and concatenates the map and reduce pools,
def create_pools(finalize, reduce_size=-1): # Create the reduce pool LOGGER.debug("Creating reduce pool") reduce_pool = RedPool(reduce_task) # Set attributes reduce_pool.on_done = finalize if reduce_size > 1: reduce_pool.group_size = reduce_size # Create the map pool LOGGER.deb...
[ "def all_reduce_worker(self, input, output):\n pass", "def create_pool_and_container(self):\n num_of_pool = self.params.get(\"num_of_pool\", \"/run/server/*/\", 3)\n container_per_pool = self.params.get(\n \"container_per_pool\", \"/run/server/*/\", 2)\n for _ in ran...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test whether can parse a pipe
def test_pipe(): parser = CmdParser([posandtwo, valprog]) out = parser.parse("posandtwo | valprog") assert isinstance(out[0], ProgramNode) assert out[0].program_desc == posandtwo assert isinstance(out[1], PipeNode) assert isinstance(out[2], ProgramNode) assert out[2].program_desc == valprog ...
[ "def _is_pipe(expr):\n return isinstance(expr, _CallNode) and len(expr.args) == 1", "def test_pipe2():\n parser = CmdParser([posandtwo, valprog])\n out = parser.parse(\"posandtwo | valprog | posandtwo\")\n assert isinstance(out[0], ProgramNode)\n assert isinstance(out[1], PipeNode)\n assert isin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test whether can parse several pipes
def test_pipe2(): parser = CmdParser([posandtwo, valprog]) out = parser.parse("posandtwo | valprog | posandtwo") assert isinstance(out[0], ProgramNode) assert isinstance(out[1], PipeNode) assert isinstance(out[2], ProgramNode) assert isinstance(out[3], PipeNode) assert isinstance(out[4], Pro...
[ "def test_pipe():\n parser = CmdParser([posandtwo, valprog])\n out = parser.parse(\"posandtwo | valprog\")\n assert isinstance(out[0], ProgramNode)\n assert out[0].program_desc == posandtwo\n assert isinstance(out[1], PipeNode)\n assert isinstance(out[2], ProgramNode)\n assert out[2].program_de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A test to see if can handle findlike single dash for long args
def test_findlike(): parser = CmdParser([findlike]) out = parser.parse("findlike . -name foo") assert out[0].arguments[0].present == True assert out[0].arguments[0].value == "foo" assert out[0].arguments[1].present == True assert out[0].arguments[1].value == "." assert out[0].as_shell_string...
[ "def test_arg_option_long_only(self):\n optional_long = [\n arg for arg in cli_args.values() if len(arg.flags) == 1 and arg.flags[0].startswith(\"-\")\n ]\n for arg in optional_long:\n assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[0]) is None, f\"{arg.flags[0]} is not...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dont expect duplicate flags unless told.
def test_duplicate_flags(): parser = CmdParser([noArgs, onearg]) with pytest.raises(CmdParseError): out = parser.parse("onearg -a -a")
[ "def SynchronizeFlags(self):\n pass", "def test_checkFlags(self):\n self.failUnlessEqual(self.nice.opts['aflag'], 1)\n self.failUnlessEqual(self.nice.opts['flout'], 0)", "def resetFlags():\r\n for flag in flags:\r\n flags[flag] = False", "def editflag(x,y,flags):\n count = fl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stop the execution and print out the captured error message.
def stop_err(msg): sys.stderr.write('%s\n' % msg) sys.exit(-1)
[ "def stop(self):\n sys.stderr = self.original", "def _stop(self):\n self.display_end_message()", "def stop_err(msg, error_level=1):\n sys.stderr.write(\"%s\\n\" % msg)\n sys.exit(error_level)", "def error(message):\n print(message)\n exit(-1)", "def finalize_error():\n print('')...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of iteration steps corrsponding to multiple of the stepsize time.
def get_steps_by_regular_time_interval(times, stepsize, max_time=None): if max_time is None: max_time = times[-1] chosen_times = arange(stepsize,max_time,stepsize) return get_steps_by_times(times,chosen_times)
[ "def cutInSteps(self,stepSize):\n\t\tres = []\n\t\t# if smaller still take it\n\t\tnum = max(1,int(self.duration/stepSize))\n\t\tfor i in range(num):\n\t\t\tnewE = self.copy()\n\t\t\tnewE.startTime = self.startTime+i*stepSize\n\t\t\tnewE.duration = stepSize\n\t\t\tres+=[newE]\n\t\treturn res", "def steps(duration...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of iteration steps (think indices) corresponding to the first time after each of the sorted eval_times.
def get_steps_by_times(times,chosen_times): time_steps = [] chosen_index = 0 for i in range(len(times)): if times[i] >= chosen_times[chosen_index]: time_steps.append(i) chosen_index += 1 if chosen_index == len(chosen_times): #We're done return time_steps if chosen_index != len(cho...
[ "def _get_run_times(self, now):\n run_times = []\n next_run_time = self.next_run_time\n while next_run_time and next_run_time <= now:\n run_times.append(next_run_time)\n next_run_time = self.trigger.get_next_fire_time(next_run_time, now)\n\n return run_times", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute operation "option" on the item of the two files
def compute_opt(filenames, item, option): if len(filenames) == 2: file1 = os.path.join(ROOT_DATA, filenames[0]) file2 = os.path.join(ROOT_DATA, filenames[1]) if not os.path.isfile(file1) or not os.path.isfile(file2): print("One of the given files '%s' or '%s' does not exists" % ...
[ "def merge(self, files1, files2, op_type):\n result = []\n\n if op_type == 'AND':\n # result = list(set(files1) & set(files2))\n p1 = 0\n p2 = 0\n s1 = int(math.sqrt(len(files1)))\n s2 = int(math.sqrt(len(files2)))\n while (p1 < len(fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
run application use gunicorn http server
def run_gunicorn_server(app): from gunicorn.app.base import Application class FlaskApplication(Application): def init(self, parser, opts, args): return { 'bind': '{0}:{1}'.format(FLASK_HOST, FLASK_PORT), 'workers': 4 } def load(self): ...
[ "def gunicorn():\n # fmt: off\n if os.name == \"nt\":\n print(\"Sorry, gunicorn is not available on windows\")\n exit(1)\n specter_gunicorn = SpecterGunicornApp(config=None)\n specter_gunicorn.run()", "def gunicorn(port, worker, mode):\n FlaskGunicorn(application).run()", "def run_g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute raw cypher queries
def cypher(self, query: str, **parameters: str) -> Any: try: # results, meta = db.cypher_query(query, parameters) results, _ = db.cypher_query(query, parameters) except CypherSyntaxError as e: log.warning(query) log.error(f"Failed to execute Cypher Query\...
[ "def run_query(self, query: str) -> BoltStatementResult:\n with self.neo4j_driver.driver.session() as session:\n return session.run(query)", "def cypher(self):\n kwargs = {'match': '',\n 'optional_match': '',\n 'where': '',\n 'with': '',\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Strip and clean up terms from special characters. To be used in fuzzy search
def sanitize_input(term: str) -> str: return term.strip().replace("*", "").replace("'", "\\'").replace("~", "")
[ "def remove_special_characters(self, txt: str) -> str:", "def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text", "def clean_word(self, word):\n return self.filter_pattern.sub(u'', word.lower())", "def clean_text ( self, text ) :\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returs tuple with joint states and TCP coordinates.
def __getitem__(self, item): # exclude tcp orientation return self._joint_states[item], self._tcp_coords[item][:2]
[ "def get_joint_state(self):\n joint_states = pybullet.getJointStates(self.robot, self.joint_indices[:self.n_ur5_joints])\n positions = []\n velocities = []\n for joint_state in joint_states:\n pos, vel, forces, torque = joint_state\n positions.append(pos)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the Wilcoxon signedrank test The Wilcoxon signedrank tests the null hypothesis that two related paired samples come from the same distribution. It tests whether the distribution of the difference x y is symmetric about zero.
def wilcoxon_test(data): n = len(data) print(n) absolute_values = [] for d in data: absolute_values.append((d, np.abs(d))) absolute_values.sort(key=lambda x: x[1]) ret = [] for i, d in enumerate(absolute_values): ret.append((i + 1, d[0], d[1])) t_plus = 0 t_minus = ...
[ "def _wilcoxon(_sample_a, _sample_b):\n res = stats.ranksums(_sample_a, _sample_b)\n print('Wilcoxon rank-sum\\nstatistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)", "def test_whiten_monotonic(self):\n gen = np.random.RandomState(seed=123)\n lo, hi = -1.2, +3....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the MannWhitney rank test on samples X and Y. It tests whether they have the same median.
def mann_whitney_u_test(X, Y): m, n = len(X), len(Y) U = 0 for x in X: for y in Y: if x < y: U += 1 E_u = m * n / 2 var_u = m * n * (m + n + 1) / 12 z = (U - E_u) / np.sqrt(var_u) p_value = 2. * norm.sf(abs(z)) # two sided test return z, p_value
[ "def _mann_whitney(_sample_a, _sample_b):\n res = stats.mannwhitneyu(_sample_a, _sample_b, use_continuity=True)\n print('Mann-Whitney rank test\\nU-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)", "def mannwhitneyu(x, y):\n n1 = len(x)\n n2 = len(y)\n ranked =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the FlignerPolicello test on samples X and Y. It tests whether they have the same median, but without assumption on shape or scale of the distributions. However, it assumes that X and Y are from two different symmetric distributions.
def fligner_policello_test(X, Y): P_i = [] for x in X: count = 0 for y in Y: if y <= x: count += 1 P_i.append(count) Q_j = [] for y in Y: count = 0 for x in X: if x <= y: count += 1 Q_j.append(count)...
[ "def kolmogorow_smirnow_2sample_test(x, y):\n r = r_stats.ks_test(robjects.FloatVector(x), robjects.FloatVector(y),\n alternative='two.sided')\n return r[0][0], r[1][0]", "def kernel_two_sample_test(X, Y, permutations=10000):\n\n if X.shape[1] != Y.shape[1]:\n raise ValueErr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the path from the current working directory (cwd) to a root directory, and a path from that root directory to some file, derives the path from the cwd to that file.
def from_cwd(root, path): return normpath(join(root, normpath(path)))
[ "def file_path(file_name, path):\n return path.rstrip('\\/') + \"/{0}\".format(file_name) if path else os.getcwd() + \"/{0}\".format(file_name)", "def root_relative_path(path):\n if path.startswith(Env.current.root_dir):\n path = path[Env.current.root_dir_len:]\n return path", "def get_path(path...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the .ciignore file to get the set of ignored directories.
def get_ignored_dirs(ci_ignore_path): with open(ci_ignore_path, 'r') as ignore_file: return set([ normpath(line.strip()) for line in ignore_file.readlines() if not line.startswith('#') and not is_blank(line) ])
[ "def findIgnore():\n toIgnore = []\n try:\n with open(BUILD_IGNORE_FILENAME, \"r\") as fin:\n for line in fin:\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n toIgnore.append(BUILD_DIR + line.replace(\"/\", os.sep))\n excep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the timespan format used in the manifest.json format.
def parse_timespan(unparsed): pattern = '%H:%M:%S' return datetime.strptime(unparsed, pattern) - datetime.strptime('00:00:00', pattern)
[ "def parse_task_time(line):\n stripret = \"\".join(line.split())\n p = re.compile(r'\\d+\\.\\d{2}-\\d+\\.\\d{2}')\n findret = p.findall(stripret) \n if findret:\n formatstr = \" \".join(line.split())\n timeregx = r'\\d+\\.\\d{2}\\s*-\\s*\\d+\\.\\d{2}'\n time = re.compile(timeregx)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
——从文件中筛选出世界时整点(00~23时)数据,分门别类存为一个字典,具体如下: ——将文件中的内容分解为一个二维列表,第一层为每行数据列表组成的列表,第二层为每行的每个元素,以制表符为分割 ——数据列表结构中第一行数据忽略,从第二行开始按照规则筛选数据存入字典 ——建立一个字典嵌套,第一层键为时间,第二层键为列表第一行每个元素(标题),第三层为相应时间和标题下的值 ——cnt中的整点值对应的数据行(列表元素行)为:cnt 60 + 2 ——新增:以10min为时距,查找RVR小于50的数据,存到result/rvr_lost/文件夹下的文件中
def rvr_file_to_dict(file: str): data_line = [] day_rvr_dict = {} make_dir('result/rvr_lost/') with open(file, 'r') as f: for line in f.readlines(): data_line += [line.split('\t', 9)] # 9为RVR文件前9段数据有意义,最后一段为剩余所有数据,根据不同的文件进行更改 count = 0 for i in range(len(data_line)): ...
[ "def uadb_ascii_to_dataframe(file=''): \n \n if debug:\n print(\"Running uadb_ascii_to_dataframe for: \", file) \n \n data = check_read_file(file=file, read=True) # TODO\n \n #source_file = [l for l in file.split('/') if '.txt' in l][0]\n\n nmiss = 0\n search_h = False ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Increments the current scope. Returns `True` if successful, otherwise `False`.
def next_scope(self) -> bool: if self._scope_index + 1 >= len(self._scopes): return False self._scope_index += 1 self._current_scope = self._scopes[self._scope_index] return True
[ "def prev_scope(self) -> bool:\n if self._scope_index - 1 < 0:\n return False\n self._scope_index -= 1\n self._current_scope = self._scopes[self._scope_index]\n return True", "def isScopeActive(self, name):", "def hasScope(self, name):", "def enterScope(self, name):", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decrements the current scope. Returns `True` if successful, otherwise `False`.
def prev_scope(self) -> bool: if self._scope_index - 1 < 0: return False self._scope_index -= 1 self._current_scope = self._scopes[self._scope_index] return True
[ "def leaveScope(self, name):", "def deleteScope():\n global currScope\n scopeStack.pop()\n currScope = scopeStack[-1]", "def leave_scope(self):\n self.symbols.pop()", "def del_scope(self):", "def scope_pop(self) -> None:\n self.scope_stack.popleft()", "def next_scope(self) -> bool:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the actions to be executed when the `phrase` is said or an empty list if the `phrase` isn't recognized.
def actions(self, phrase: str) -> list: return self._current_scope.get(phrase, [])
[ "def checkActions(phrase):\n matches = []\n for act in db['actions']:\n # Check if potential match\n if act['trigger_word'] in phrase:\n obj = act['function']()\n # If deeper analysis fails, ignore it\n if not obj.phraseMatch(phrase):\n continue\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the possible phrases that can be said in the current scope.
def phrases(self) -> set: return set(phrase for phrase in self._current_scope.keys())
[ "def lookup_pronunciations_for_phrase(words: Sequence[Text]) -> Sequence[Phrase]:\n return EnglishUtils.all_possible_phrases_for(words)", "def known_words():\n return '\\n'.join(known_words) + '\\n'", "def get_standard_phrases(self):\n language = profile.get(['language'], 'en-US')\n\n ke...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> rI.getRoadInformation("RandomRoad") should return 1,1 (1, 1)
def getRoadInformation(self,nameOfRoad): exist = getattr(self,'_hashMap',None) if (exist is not None) and (nameOfRoad in self._hashMap): return self._hashMap[nameOfRoad]['lanes'],self._hashMap[nameOfRoad]['length'] else: return 1,1
[ "def test_get_path_returns_tuple():\n result = flight_paths.get_path('Boston', 'London')\n assert result == (['Boston', 'London'], 3275.367430733415)", "def simpleObjPickRoad(obj, roads):\n # in here the obj (either union or terrace) consists of one building\n fittestRid = -1\n accessPoint = Point(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if folders fo logs and DB exists, create folders if don't.
def check_if_dir_exists(): if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + ".." + os.sep + "logs"): try: os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + ".." + os.sep + "logs") logger.debug("Dir for logs has been created") except OSError: logger.debug(f"Cre...
[ "def checkFolders (self) :\n\n # obtiene la ruta del directorio /databases\n databases_dir = self.pt.getPathDatabasesDir()\n\n # si no existe el directorio, lo crea\n if not os.path.exists(databases_dir) :\n print 'El directorio /databases no existia, ha sido creado nuevamente...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether to allow the message given the current state of the guard
def allow(self, message): if message.author.id == Guard.AUTHOR: return True if message.author.id in Guard.BANNED_USERS: return False if self.state == State.TRUSTED_ONLY and not Guard.is_trusted(message): return False if self.state == State.SUDO_ONLY an...
[ "def should_answer_msg(self, msg):\n jid = self.get_real_jid(msg)\n if jid in self.acl.banned:\n return False\n if not self.require_membership:\n return True\n if self.msg_from_member(msg):\n return True\n return False", "async def check_message(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns whether in the circumstances of the given message, a sudo action is allowed
def allow_sudo(message): if message.author.id == Guard.AUTHOR and message.channel.type == discord.ChannelType.private: return True if message.author.id in Guard.SUDO_IDS and message.channel.id in Guard.SUDO_CHANNELS: return True return False
[ "def allow(self, message):\n if message.author.id == Guard.AUTHOR:\n return True\n if message.author.id in Guard.BANNED_USERS:\n return False\n if self.state == State.TRUSTED_ONLY and not Guard.is_trusted(message):\n return False\n if self.state == State....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns whether the circumstances of the message, the author is trusted
def is_trusted(message): author = message.author if author.id == Guard.AUTHOR: return True if author.id in Guard.BANNED_USERS: return False try: if set([role.name for role in author.roles]).intersection(Guard.TRUSTED_ROLES): return True...
[ "def allow(self, message):\n if message.author.id == Guard.AUTHOR:\n return True\n if message.author.id in Guard.BANNED_USERS:\n return False\n if self.state == State.TRUSTED_ONLY and not Guard.is_trusted(message):\n return False\n if self.state == State....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns whether we have the specified permission when replying to the message
def has_permission(message, permission): if message.channel.type == discord.ChannelType.private: return True if getattr(message.channel.guild.me.permissions_in(message.channel), permission): return True return False
[ "def has_perm(self, user, perm):\r\n #superuser has all rights\r\n if user.is_superuser:\r\n return True\r\n if perm in [OI_READ, OI_ANSWER]:\r\n if self.project:\r\n return self.project.has_perm(user, perm)\r\n else:\r\n return Tru...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the intent of the message, as defined by the Intent class
def get_intent(msg): if re.search(MapController.MAP_REGEX, msg.content) and client.user.id in msg.raw_mentions: return Intent.MAP elif re.match(Controller.KEY_REGEX, msg.content): return Intent.DIRECT else: return Intent.NONE
[ "def _intent(self) -> MessageIntent:\r\n pass", "def intent(self) -> str:\n return pulumi.get(self, \"intent\")", "def intent_name(self):\n return self._intent_name", "def get_intent_name(self):\n return self.request_data[\"queryResult\"][\"intent\"][\"name\"]", "def get_intent(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorate a function as requiring sudo access
def privileged(f): @wraps(f) def wrapper(self, msg, *args, **kwargs): if not Guard.allow_sudo(msg): return return f(self, msg, *args, **kwargs) return wrapper
[ "def root_privileges_required(func):\n @functools.wraps(func)\n def wrapped_function(*args, **kwargs):\n \"\"\" Wrapper around func. \"\"\"\n if os.geteuid() != 0:\n exit_cli(\"Root privileges required for this operation\", fg=\"red\")\n return func(*args, **kwargs)\n\n wrap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an instance of a map controller based on the regex match object
def from_match(match): clat = float(match.group(1)) clng = float(match.group(2)) if match.group(3): mlat = float(match.group(3)) mlng = float(match.group(4)) else: mlat = mlng = None zoom = float(match.group(5)) return MapController(cla...
[ "def map(self, regex, response):\r\n self.url_map.insert(0, (re.compile(regex), response))", "def from_re_match(cls, match):\n kwargs = match.groupdict()\n location = kwargs['location'].split()\n kwargs['location'] = (int(location[0]), int(location[1]),\n int(l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the world map image This method caches the result the first time it is called.
def get_world_image(cls): if cls.map_image is None: with open(str(Path(RES_PATH, cls.map_path)), 'rb') as infile: cls.map_image = Image.open(infile).convert('RGBA') return cls.map_image
[ "def createMap(self):\n # Create a byte array to receive the computed maps\n mapbytes = bytearray(self.mapconfig.SIZE_PIXELS * self.mapconfig.SIZE_PIXELS)\n \n # Get final map \n self.slam.getmap(mapbytes)\n\n return mapbytes", "def toworld(self, *args, **kwargs):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the marker image This method caches the result the first time it is called.
def get_marker_image(cls): if cls.marker_image is None: with open(str(Path(RES_PATH, cls.marker_path)), 'rb') as infile: cls.marker_image = Image.open(infile).convert('RGBA').resize((32, 32)) return cls.marker_image
[ "def get_image(self):\n if not hasattr(self, '_BasePublication__image_cache'):\n images = self.get_images()\n self.__image_cache = images[0].picture if images else None\n return self.__image_cache", "def fetchImage(self):\r\n self.image_path = \"{}image_{}.jpg\".format(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sleep until the specified datetime
async def wait_until(dt): now = datetime.now() await sleep((dt - now).total_seconds())
[ "def sleep_until(end_date):\n if end_date <= datetime.datetime.now(datetime.timezone.utc):\n return\n time.sleep((end_date - datetime.datetime.now(datetime.timezone.utc)).total_seconds())", "def sleep_until(hour, minute=0):\n import datetime\n from time import sleep\n\n now = dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Schedule sending the status of the bot to author's DM
async def schedule_status(): while True: if controller.scheduled_status_date is not None: return controller.scheduled_status_date = datetime.now()+timedelta(hours=23) await wait_until(controller.scheduled_status_date) channel = await client.fetch_channel(Guard.AUTHOR_DM) ...
[ "async def status(self, msg, *args):\n content = self.get_status()\n await msg.channel.send(**{\n 'content': content,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "async def status_changer():\r\n playing = [discord.Game(name=\"wit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Schedule setting the status of the bot
async def schedule_activity(): if controller.scheduled_activity_date is not None: return controller.scheduled_activity_date = datetime.now()+timedelta(seconds=30) await wait_until(controller.scheduled_activity_date) await client.change_presence(activity=discord.Activity(type=discord.ActivityType...
[ "async def schedule_status():\n while True:\n if controller.scheduled_status_date is not None:\n return\n controller.scheduled_status_date = datetime.now()+timedelta(hours=23)\n await wait_until(controller.scheduled_status_date)\n channel = await client.fetch_channel(Guard....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the command exists and is enabled
def is_enabled(command): if command not in Controller.commands: return False return Controller.commands[command][2]
[ "def checkIfEnabled(self):\n\n # Reload the command file to check for new commands\n importlib.reload(BotSettings)\n matches = BotSettings.config['commands']\n\n # Check for the match and if it is there return the value that goes with the command\n for key in matches:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the wikitext of the specified item. This method handles redirects as well.
async def get_wikitext(item): item = item.strip() url = Controller.WIKI_API_REV_URL + item response = await Controller.http_get(url) try: pages = json.loads(response)['query']['pages'] key = list(pages.keys())[0] if key == '-1': raise V...
[ "def GetItemText(self, item):\r\n\r\n return item.GetText()", "def get_text(item_id):\n if item_id in all_items:\n return all_items[item_id]['text']\n return None", "def get_text(self, obj):\n return self.itemcget(obj, 'text')", "def list_item_text(item: str) -> str:\n\n\tif match(r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the canonical title for the given title, if found
async def canonical_title(title): url = Controller.WIKI_API_SEARCH_URL + title response = await Controller.http_get(url) try: pages = json.loads(response)['query']['search'] if len(pages) == 0: return None for page in pages: if ...
[ "def get_canonical_id_from_title(article):\n\n return urlparse.unquote(article).replace(' ', '_')", "def get_title(title: str):\n return title", "def normalize_title(title):\n if title is None:\n return None\n title = re.sub(r'[_-]', ' ', title)\n camel = re.compile('([A-Z][A-Z][a-z])|([a-...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replies the user with the wikilink for the specified item
async def link(self, msg, item=None, *args): if not Guard.has_permission(msg, 'embed_links'): await msg.channel.send(**{ 'content': 'Cannot send links on this channel', 'reference': msg.to_reference(), 'mention_author': True, 'delete_af...
[ "def item_link(self, item):\n return item.get_absolute_url()", "def __transform_item_link(self, item: Dict[str, Any]) -> str:\n if item['external_link']:\n return item['external_link']\n else:\n return self.construct_alternate_link() + '/' + item['url']['url']", "def i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replies the user with the crafting recipe of the given item
async def recipe(self, msg, item=None, *args): if not Guard.has_permission(msg, 'embed_links'): await msg.channel.send(**{ 'content': 'I need embed_links permission to answer in this channel', 'reference': msg.to_reference(), 'mention_author': True, ...
[ "async def craft(ctx):\r\n item = arg(ctx).capitalize()\r\n if not char.char:\r\n await bot.say('Hey kid unless you\\'ve got magical hands you won\\'t be able to craft this item, come back when you\\'ve become a hero.')\r\n return\r\n elif check(cursor, 'craft', 'name', it...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the template name is a type of infobox
def is_infobox(self, name): name = name.strip() if name.lower().startswith('infobox'): return True if name == 'Armors_(NEW)': return True if name == 'All_inclusive_infobox_2020': return True if name.lower() == 'item': return True ...
[ "def is_template(self):\n\t\treturn bool(call_sdk_function('PrlFoundVmInfo_IsTemplate', self.handle))", "def is_template(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTemplate', self.handle))", "def is_template(self) -> bool:\n config = self.get_config()\n return \"template\" in config.key...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replies the user with the information from infobox of the specified item
async def info(self, msg, item=None, *args): if not Guard.has_permission(msg, 'embed_links'): await msg.channel.send(**{ 'content': 'I need embed_links permission to answer in this channel', 'reference': msg.to_reference(), 'mention_author': True, ...
[ "def OnInfoEdit(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n item = self.items[selections[0]]\r\n if self.gInfoBox.IsModified():\r\n self.data.setInfo(item,self.gInfoBox.GetValue())", "def getInfo(self,item):\r\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch and cache the trading table from wiki
async def get_trading_table(self): if self.trading_table is None: self.trading_table = {} wikitext = await Controller.get_wikitext('Trading') for match in re.finditer(r"===='''([^']+)'''====\n({\|[^\n]*\n(?:[^\n]*\n)+?\|})", wikitext): place = match.group(1) ...
[ "def retrieve_table(self):\n if self.use_local_table:\n self.retrieve_table_local()\n else:\n self.retrieve_table_from_url()", "def retrieve_table_from_url(self):\n table_list = pd.read_html(self.url)\n self.raw_table = table_list[0]\n self.format_table_fro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replies the user with a list of places that trade for and from the item if the argument is an item name, and a list of possible trades if the argument is a location name If the argument is empty, replies the user with the list of possible trading locations
async def trader(self, msg, arg=None, *args): trading_table = await self.get_trading_table() self_delete = False if not arg: content = '• '+'\n• '.join(place.capitalize() for place in trading_table.keys()) content = f'Places you can trade:\n{content}' else: ...
[ "def request_item(date_in, loc_in, item_in, meal_in, requisites):\n secrets = get_secrets()\n url = secrets.get('m_dining_api_main')\n location = '&location='\n date = '&date='\n meal = '&meal='\n\n #API url concatenation\n location += loc_in\n date += str(date_in)\n url = url + location ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replies the user with a snapshot of the specified location
async def snapshot(self, msg, *args): if not Guard.has_permission(msg, 'attach_files'): await msg.channel.send(**{ 'content': 'Cannot send images on this channel', 'reference': msg.to_reference(), 'mention_author': True, 'delete_after':...
[ "def get_snapshot(ctx, user_id, snapshot_id):\n\tlogger.info('Getting snapshot...')\n\tlogger.debug(f'{user_id=}, {snapshot_id=}')\n\treturn IOAccess.read_url(f'{ctx.obj[\"url_base\"]}/users/{user_id}/snapshots/{snapshot_id}', 'json',\n\t driver_kwargs=driver_kwargs)", "async def snapshot(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replies the user with the coordinates of the given place, as well as the snapshot and the URL
async def location(self, msg, place_name=None, *args): if not place_name: return if args: place_name = f'{place_name} {" ".join(args)}' if place_name.lower() in MapController.locations: lat, lng, size = MapController.locations[place_name.lower()] m...
[ "def coords2places():\n latitudes = request.args.getlist(\"latitudes\")\n longitudes = request.args.getlist(\"longitudes\")\n place_type = request.args.get(\"placeType\", \"\")\n # Get resolved place coordinate information for each coordinate of interest\n coordinates = []\n for idx in range(0, min(len(latitu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replies the user with the distance between the two place names mentioned
async def distance(self, msg, place1=None, place2=None, *args): if not place1 or not place2: return try: if place1.lower() not in MapController.locations: raise ValueError(place1) if place2.lower() not in MapController.locations: raise ...
[ "def _calculate_distance(place_lat, place_lon,\n location_lat, location_lon):\n print(f\"Calculating distance from place ({place_lat},{place_lon}) to location ({location_lat},{location_lon})\")\n return distance((place_lat, place_lon),\n (location_lat, location_lon))....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the activity of the bot
async def set_activity(self, msg, activity=None, *args): await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=activity))
[ "def setActivity(self, activity):\n self.__activity = activity", "async def set_activity(activity):\n\tkind, name = activity.split(\" \", maxsplit=1)\n\tkinds = {\n\t\t\"playing\" : ActivityType.playing,\n\t\t\"watching\" : ActivityType.watching,\n\t\t\"listening-to\" : ActivityType.listening,\n\t}\n\tawai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }