query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Saves a list of organizations the user belongs to.
def save_organizations(self, user, path=None): # Redis has an end_cursor if we've collected this data before end_cursor = self.redis.get(''.join(['gh:', user.login, ':organizations:endCursor'])) if end_cursor: end_cursor = end_cursor.decode('utf-8') end_cursor = ''.join([...
[ "def list_organizations():\n check_session()\n new_item = False\n if request.args(0):\n try:\n record = db((db.organization.id == request.args(0)) & (db.organization.created_by == auth.user.id)).select()[0]\n except:\n redirect(URL('default', 'list_organizations'))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves a list of repositories this user has pinned to their profile.
def save_pinned_repositories(self, user, path=None): # Redis has an end_cursor if we've collected this data before end_cursor = self.redis.get(''.join(['gh:', user.login, ':pinnedRepositories:endCursor'])) if end_cursor: end_cursor = end_cursor.decode('utf-8') end_cursor ...
[ "def sync():\n _ownered_project = []\n _tmp_project_list = get_user_repo_list(current_user.username)\n if _tmp_project_list:\n for project in _tmp_project_list:\n _ownered_project.append((project, project))\n # Add upperstream_repo\n upperstream_repo = get_upperstrea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves a list of public keys associated with this user.
def save_public_keys(self, user, path=None): # Redis has an end_cursor if we've collected this data before end_cursor = self.redis.get(''.join(['gh:', user.login, ':publicKeys:endCursor'])) if end_cursor: end_cursor = end_cursor.decode('utf-8') end_cursor = ''.join(['"', ...
[ "def savePublicKeys(self, n):\n self.__allPublicKeys = [ self.__messageLog.pop(0) for _ in range(n) ]", "def save_keys(self):\n if self.public_key is not None and self.private_key is not None:\n try:\n # with open('wallet-{}.txt'.format(self.node_id), mode='w') as f:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves a list of pull requests associated with this user.
def save_pull_requests(self, user, path=None): # Redis has an end_cursor if we've collected this data before end_cursor = self.redis.get(''.join(['gh:', user.login, ':pullRequests:endCursor'])) if end_cursor: end_cursor = end_cursor.decode('utf-8') end_cursor = ''.join(['...
[ "def _create_pull_request(self) -> None:\n url = \"{github_api_address}/repos/{owner}/{repo_name}/pulls\".format(\n github_api_address=self.project_details.git_repository.github_api_address,\n owner=self.project_details.git_repository.github_project_owner,\n repo_name=self.pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves a list of repositories that the user owns.
def save_repositories(self, user, path=None): # Redis has an end_cursor if we've collected this data before end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositories:endCursor'])) if end_cursor: end_cursor = end_cursor.decode('utf-8') end_cursor = ''.join(['"...
[ "def sync():\n _ownered_project = []\n _tmp_project_list = get_user_repo_list(current_user.username)\n if _tmp_project_list:\n for project in _tmp_project_list:\n _ownered_project.append((project, project))\n # Add upperstream_repo\n upperstream_repo = get_upperstrea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves a list of repositories that the user recently contributed to other than their own.
def save_repositories_contributed_to(self, user, path=None): # Redis has an end_cursor if we've collected this data before end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositoriesContributedTo:endCursor'])) if end_cursor: end_cursor = end_cursor.decode('utf-8') ...
[ "def save_repositories(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositories:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves a list of repositories that the user has starred.
def save_starred_repositories(self, user, path=None): # Redis has an end_cursor if we've collected this data before end_cursor = self.redis.get(''.join(['gh:', user.login, ':starredRepositories:endCursor'])) if end_cursor: end_cursor = end_cursor.decode('utf-8') end_curso...
[ "def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the given endpoint for the given domain. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument.
def discoverEndpoint(domain, endpoint, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True): if test_urls: ronkyuu.URLValidator(message='invalid domain URL')(domain) if content: result = {'status': requests.codes.ok, 'headers': None, ...
[ "def discoverTokenEndpoints(domain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True):\n return discoverEndpoint(domain, ('token_endpoint',), content, look_in, test_urls, validateCerts)", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the micropub for the given domain. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument.
def discoverMicropubEndpoints(domain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True): return discoverEndpoint(domain, ('micropub',), content, look_in, test_urls, validateCerts)
[ "def search(wiki, pattern):\n wiki.search_tags(pattern)", "def find_band(args: Any) -> str:\n content = get_html_content(args, URL_RANDOM, \"Cannot get a random band\")\n if content:\n parser = BeautifulSoup(content, 'lxml')\n band_country = parser.select('#band_stats > dl > dd > a')[0].tex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the token for the given domain. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument.
def discoverTokenEndpoints(domain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True): return discoverEndpoint(domain, ('token_endpoint',), content, look_in, test_urls, validateCerts)
[ "def search(wiki, pattern):\n wiki.search_tags(pattern)", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and enqueue future enqueue (args, resolve) > source_id resolve (source, resolve_args) > None
def source_create (self, resolve, cancel, enqueue, args = None): future, source = FutureSourcePair () def resolve_internal (*resolve_args): self.sources.discard (source) resolve (source, *resolve_args) return False # remove from event loop if cancel: ...
[ "def enqueue_call(self, func, args=None, kwargs=None, timeout=None, result_ttl=None): #noqa\n timeout = timeout or self._default_timeout\n job = Job.create(func, args, kwargs, connection=self.connection,\n result_ttl=result_ttl, status=Status.QUEUED)\n yield self.enqueue...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a .tab file into a 2D array. Separates meta info from data.
def readTab(file_name): data = [] meta = [] l=0 for line in open(file_name): if l<3: meta.append(line.strip("\n").split("\t")) else: if len(line.strip("\n").split("\t")) == len(meta[0]): data.append(line.strip("\n").split("\t")) l += 1 ...
[ "def readTAB(file_name):\r\n lst = []\r\n f = open(file_name)\r\n for line in f:\r\n tmp = line.rstrip('\\n')\r\n tmp = tmp.replace('\"', '')\r\n tmp = tmp.split('\\t')\r\n lst.append(tmp)\r\n f.close()\r\n # print 'Data read from: ' + file_name\r\n return lst", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check if elem is a county name
def isCountyName(elem): return (elem.attrib['k'] == "tiger:county")
[ "def identifyCounty(line):\n matches = re.findall('[a-zA-Z]', line)\n if len(matches) > 0 and ''.join(matches) != \"Total\":\n return True", "def is_cuisine(elem):\n return elem.attrib['k'] == 'cuisine'", "def is_named(ucs):\n try:\n return bool(unicodedata.name(ucs))\n except Value...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if this NestedInteger holds a single integer, rather than a nested list.
def isInteger(self): return isinstance(self.value, int)
[ "def isInteger(self):\n if not isinstance(self.val, list):\n return True\n else:\n return False", "def isInteger(self) -> bool:", "def isInteger(self):\n pass", "def isInteger(self):\n return _libsbml.ASTNode_isInteger(self)", "def is_int_list(self, lst):\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
def add(self, elem: 'NestedInteger'): if self.value is None: self.value = [elem] elif self.isInteger(): self.value = [NestedInteger(self.value), elem] else: self.value = [*self.value, elem]
[ "def add(self, elem):\n if isinstance(elem, NestedInteger):\n self.val.append(elem)", "def __init__(self, value=None):\n # nested_list 中并不一定只有一个元素(有n个同级元素),每个都是 NestedInteger instance\n if value:\n self.val = value\n else:\n self.val = []", "def add(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set this NestedInteger to hold a single integer equal to value.
def setInteger(self, value: int): self.value = value
[ "def setInteger(self, value):\n self.val = value", "def setInteger(self, value):\n assert self._is_int is True\n self._value = value", "def setInteger(self, value):\n pass", "def x(self, value=None):\n if isinstance(value, (int, float)):\n self[0] = value\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
the single integer that this NestedInteger holds, if it holds a single integer Return None if this NestedInteger holds a nested list
def getInteger(self): return self.value if self.isInteger() else None
[ "def getInteger(self):\n if self.isInteger():\n return self.val\n else:\n return None", "def getList(self):\n if not self.isInteger():\n return self.val\n else:\n return None", "def getList(self):\n return self.value if not self.isIn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
the nested list that this NestedInteger holds, if it holds a nested list Return None if this NestedInteger holds a single integer
def getList(self): return self.value if not self.isInteger() else None
[ "def getList(self):\n if not self.isInteger():\n return self.val\n else:\n return None", "def __init__(self, value=None):\n # nested_list 中并不一定只有一个元素(有n个同级元素),每个都是 NestedInteger instance\n if value:\n self.val = value\n else:\n self.va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do activation scale calibration on the given light_graph
def main(light_graph, calibration_data, hw_specs, sw_config, sim_params, nodes_to_calibrate): with graph_collection.GraphCollection() as graph_coll: # Create calibration graph hist_coll = graph_coll.histogram_collection() convert_to_calib_graph = ...
[ "def _RunCalibration(self, graph_key, gdef, input_data, config):\n return self._RunGraph(graph_key, gdef, input_data, config, 30)", "def sensor_transform(self, activation):\r\n # rescale to (0, 1) interval, assuming activation is positive\r\n # return activation / (1 + activation)\r\n K, l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to send message to asset_index websocket channel. Asset Index (request) Retrieve a list of all available underlyings and the corresponding contract types and duration boundaries. If the user is logged in, only the assets available for that user's landing company will be returned.
def __call__(self, landing_company: Optional[str] = None, passthrough: Optional[Any] = None, req_id: Optional[int] = None): data = { "asset_index": int(1) } if landing_company: data['landing_company'] = str(landing_company) return self.send_websocket_request(se...
[ "def credit_index_handler():\n page_size, page_index = parse_list_args2()\n page_size = page_size or 10\n page_index = page_index or 1\n data, total = CreditBusiness.paginate_data(page_size, page_index)\n return json_list_render2(0, data, page_size, page_index, total)", "def api_asset_list():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ability to create a empty page
def test_create_page(self): self.assertEqual(self.client.get(reverse('home')).status_code, 404) page = Page.objects.create(**_page_data) self.assertEqual(page.title, _page_data['title']) self.assertEqual(page.page_type, _page_data['page_type']) response = self.client.get(rever...
[ "def test_can_create_page(self):\n pass", "def test_create_empty_page(self):\n\n url = reverse('api-pages-list')\n\n response = self.client.post(url, None, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_cannot_create_homepage(self):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ability to create a page with full header
def test_create_page_with_header(self): link_1 = PageHeadDropMenu.objects.create\ (internal_link='home', **_head_link_data) link_2 = PageHeadDropMenu.objects.create( external_link=_external_link, **_head_link_data) link_3 = PageHeadDropMenu.objects.create(**_head_link_da...
[ "def test_can_create_page(self):\n pass", "def create_page(self):", "def _create_page(self, body: Body):", "def test_homepage_render(self):\n\n result = self.client.get(\"/\")\n self.assertIn(\"<h1 class=\\\"title\\\">Bark Park!</h1>\", result.data)", "def test_create_page_with_main_box...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ability to create a page with help box
def test_create_page_with_help_box(self): button = PageButton.objects.create(**_button_data) help_block = PageHelpBoxBlock.objects.create( button=button, **_help_box_data) page = Page.objects.create(help_box_block=help_block, **_page_data) response = self.client.get(reverse...
[ "def test_can_create_page(self):\n pass", "def test_help_menu(run):\n out, _err = run(dork.cli.help_menu)\n assert 'Help' in out, 'Help wasnt found'", "def test_helpful_page_view(self):\n target_url = url_for('dashboard.helpful_pages')\n redirect_url = url_for('users.login', next=targ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ability to create a page with main box
def test_create_page_with_main_box(self): main_block = PageMainBlock.objects.create(**_main_block_data) Page.objects.create(main_block=main_block, **_page_data) response = self.client.get(reverse('home')) self.assertEqual(response.status_code, 200) self.assertIn('text', respons...
[ "def test_can_create_page(self):\n pass", "def create_page(self):", "def test_cannot_create_homepage(self):\n root_page = Page.objects.get(pk=1)\n try:\n self.assertCanCreate(root_page, HomePage, nested_form_data({\n 'title': 'Home',\n }))\n s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ability to create a page with main footer
def test_create_page_with_footer(self): footer_block = PageFooterBlock.objects.create(**_footer_block_data) link_1 = PageHeadDropMenu.objects.create\ (internal_link='home', **_head_link_data) link_2 = PageHeadDropMenu.objects.create( external_link=_external_link, **_head...
[ "def test_build_footer(self):\n self.title = \"test\"\n empty_sheet = html_builder.HtmlSheet(self.title, None, \"author\")\n test_html = empty_sheet.build_footer(\"author.png\", \"http://author.com\", \"sponsor\", \"http://sponsor.com\")\n self.assertEqual(test_html[0], test_html_constan...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ability to create a page with main what you need block
def test_create_page_with_whatyouneed_block(self): what_you_need_block = PageWhatYouNeedBlock.objects.create( **_whatyouneed_block_data) Page.objects.create(what_you_need_block=what_you_need_block, **_page_data) response = self.client.get(reverse('home')...
[ "def test_can_create_page(self):\n pass", "def test_create_page_with_main_box(self):\n\n main_block = PageMainBlock.objects.create(**_main_block_data)\n Page.objects.create(main_block=main_block, **_page_data)\n response = self.client.get(reverse('home'))\n self.assertEqual(resp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns search results of the query obtained in request args. It Returns four seperate variables containing results for artists, music, albums and records.
def search(): if not request.vars.search_term: redirect(URL('index')) term = request.vars.search_term origterm = term term = term.replace(' ','|') artists = db.executesql("select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.b_month,m1.b_date,m1.e_year,m1.e_month,m1.e_day,ts_rank(to_tsve...
[ "def full_search(self, query):\n return {\n 'artists': self.artist_search(query)['artists'],\n 'albums': self.album_search(query)['albums'],\n 'tracks': self.track_search(query)['tracks'],\n }", "def get_artists():\n return query_multiple(request.args, artist_sear...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This action is reponsible for displaying all the information related to an artist
def artist(): if not request.vars.id: redirect(URL('index')) id = request.vars.id artistname = db.executesql("select m1.name from artist_name as m1, artist as m2 where m1.id = m2.name and m2.id = "+id+";") urls = db.executesql("select distinct(m2.url) from l_artist_url m1, url m2 where m2.id = m1.entity1 and m1.e...
[ "def view_all_artists(self) -> None:\n self.clear_window()\n with ArtGalleryDatabase(\"art_gallery.db\") as conn_cursor:\n conn_cursor.execute(\"\"\"SELECT * FROM Artists\"\"\")\n for row in conn_cursor.fetchall():\n data_record = f\"ArtistId: {row[0]} Nam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract consecutive ners from the result of CoreNLPNERTagger
def merge_ners(tokens): ners = list() merged_tokens = list() candid_entity = list() keep = False prev_tag = 'O' for i, (token, tag) in enumerate(tokens): if keep: if tag not in IGNORE_NER_TAG: candid_entity.append(token) keep = True ...
[ "def extract_ngrams(self, sequence):\n sequence = self.prefix + sequence + self.suffix\n for i, event in enumerate(sequence[self.n:], self.n):\n yield event, sequence[i-self.n: i]", "def ngram_tokenizer(sent, n):\n return ['<s>'] * (n-1) + sent.split() + ['</s>'] * (n>1)", "def get_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load freebase entity dictionary from saved dict
def load_freebase_entity(path="../data/freebase/dict.txt"): logger.info('Loading freebase entity dictionary...') name2id = dict() id2name = dict() with open(path, 'r', buffering=1024 * 1024 * 100) as f: for line in f: tokens = line.split('\t') _name = tokens[0].strip() ...
[ "def _load(self, load_dict):\n self._data_ = load_dict", "def map_dict(self, dict_entity):\n self.dict_entity = dict_entity\n Entity.map(self, self.dict_entity)", "def load(self, filenameordict):\n self.vocabulary = None\n if isinstance(filenameordict, (dict, shelve.Shelf)):\n se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return position of ner in list of tokens
def get_nerpos(tokens, ner): loc = list() for i, token in enumerate(tokens): if token == ner: loc.append(i) return loc
[ "def get_token_loc(token_list, PRP_MARKER):\n loc = -1\n for i,tokens in enumerate(token_list):\n\n for j,t in enumerate(tokens):\n loc += 1\n\n if (len(t) > 6) and (PRP_MARKER in t):\n l = t.index(PRP_MARKER)\n token_list[i][j] = t[:l] + t[l+len(PRP_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return positions of ners in list of tokens
def get_nerspos(tokens, ners): pos_list = list() for ner in ners: pos = get_nerpos(tokens, ner) pos_list.append(pos) return pos_list
[ "def get_nerpos(tokens, ner):\n\n loc = list()\n for i, token in enumerate(tokens):\n if token == ner:\n loc.append(i)\n return loc", "def get_token_loc(token_list, PRP_MARKER):\n loc = -1\n for i,tokens in enumerate(token_list):\n\n for j,t in enumerate(tokens):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a appropriate OTP for the current Vault version under test.
def get_generate_root_otp(): if vault_version_ge("1.10.0"): test_otp = "BMjzW3wAsEzINXCM05Wbas3u9zSl" elif vault_version_ge("1.0.0"): test_otp = "ygs0vL8GIxu0AjRVEmJ5jLCVq8" else: test_otp = "RSMGkAqBH5WnVLrDTbZ+UQ==" return test_otp
[ "def GetOTP(user):\r\n return _ComputeOTP(_GetUserSecret(user),\r\n long(time.time() / _GRANULARITY))", "def GetOTP(user):\n return _ComputeOTP(_GetUserSecret(user),\n long(time.time() / _GRANULARITY))", "def test_v1userbankotp_number(self):\n pass", "def test_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load test config file data for use by various test cases.
def load_config_file(filename): test_data_path = get_config_file_path(filename) with open(test_data_path) as f: test_data = f.read() return test_data
[ "def test_load_from_file(self):\n cf = ConfigFile()\n cf.load_from_file(TestConfigFile.TEST_CONFIG)\n\n self.assertEqual(4, len(cf))\n self.assertEqual(cf[\"key1\"], \"val1\")\n self.assertEqual(cf[\"key2\"], \"val2\")\n self.assertEqual(cf[\"key3\"], \"val3\")\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the path to a config file under the "tests/config_files" directory. I.e., the directory containing selfsigned certificates, configuration files, etc. that are used for various tests.
def get_config_file_path(filename): # Use __file__ to derive a path relative to this module's location which points to the tests data directory. relative_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "..", "config_files" ) return os.path.join(os.path.abspath(relative_path), f...
[ "def get_config_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n config_path = os.path.join(root, 'config.ini')\n\n return config_path", "def get_config_path(config):\n section = config.sections()[0]\n return Path(config.get(section, \"path\")).expanduser().absolute()", "def g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decode a newly generated root token via Vault CLI.
def decode_generated_root_token(encoded_token, otp): command = ["vault"] if vault_version_ge("0.9.6"): # before Vault ~0.9.6, the generate-root command was the first positional argument # afterwards, it was moved under the "operator" category command.append("operator") command.exten...
[ "def meraki_vault_r_secret(mount, path):\n read_secret_result = client.secrets.kv.v1.read_secret(path=meraki_vault_path, mount_point=vault_mount_point)\n api_token = read_secret_result['data']['token']\n return api_token", "def decode_jwt_from_console():\n import pyperclip\n response = json.loads(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method to add `encoding='utf8'` to subprocess.Popen.
def get_popen_kwargs(**popen_kwargs): popen_kwargs["encoding"] = "utf-8" return popen_kwargs
[ "def set_terminal_encoding(encoding='utf_8'):\n sys.stdin = codecs.getreader(encoding)(sys.stdin)\n sys.stdout = codecs.getwriter(encoding)(sys.stdout)\n sys.stderr = codecs.getwriter(encoding)(sys.stderr)", "def unicode_arg(arg):\n if sys.stdin.encoding:\n return arg.decode(sys.stdin.encoding)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Input file_path, save model weights into a file of given format.
def save_weights(self, file_path, format=None): _save_weights(self, file_path, format)
[ "def save_weights(self, file_path):\n with open(file_path, 'wb') as fp:\n pickle.dump(self.weights, fp)", "def save_model(self, weight_file):\n self.W.astype('float32').tofile(weight_file)\n print ('model saved to', weight_file)", "def save_model(self, file_name):\n\t\tself.model...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a LayerNode for this layer given input_tensors, output_tensors.
def _add_node(self, input_tensors, output_tensors): raise NotImplementedError
[ "def add_layer(inputs, in_size, out_size, activation_function=None):\n # add one more layer and return the output of this layer\n # x*W+b-->y\n Weights = tf.Variable(tf.random_normal([in_size, out_size]))\n biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n Wx_plus_b = tf.matmul(inputs, Weights) +...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all trainable weights. Returns a list of all trainable parmeters.
def trainable_weights(self): self._trainable_weights = list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True))) return self._trainable_weights
[ "def provide_weights(self):\n return []", "def get_weights(self):\n return self.mlp.get_weights()", "def get_weight_list(self) -> List[float]:\n return self._weight_list", "def list_all_weights_in_training_dir(self):\n loc = dirname(self.get_weights_file())\n return [f for f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all untrainable weights. Returns a list of all untrainable weights.
def nontrainable_weights(self): return list(filter(lambda x: not x.requires_grad, self.get_parameters(expand=True)))
[ "def provide_weights(self):\n return []", "def trainable_weights(self):\n self._trainable_weights = list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True)))\n return self._trainable_weights", "def prune_weights(self):\n pass", "def get_all_weights_bias(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the default form class used for user registration.
def get_form_class(self, request): return RegistrationForm
[ "def get_registration_form_class():\n custom_class = getattr(django_settings, 'REGISTRATION_FORM', None)\n if custom_class:\n return load_module(custom_class)\n else:\n return OpenidRegisterForm", "def get_form_class(self, request):\n return RegistrationForm", "def get_form_class(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a historic of locations
def get_historic_location(): # noqa: E501 db = PostgresDB() historial = db.get_locations() if "Error" in historial: return jsonify(msg=historial) if len(historial) > 0: data = {"historial" : []} for row in historial: data['historial'].append( { ...
[ "async def historic(self) -> dict:\n return await self._request(\n \"get\", \"https://www.asthmaforecast.com/api/forecast/historic/asthma\"\n )", "def get_historic_data(self):\n\n historic_market_events = []\n\n return historic_market_events", "def reportLocations(self,rem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert from keras to tf
def keras_to_tensorflow( keras_model, output_dir: Path, model_name, out_prefix="output_", log_tensorboard=True, ): if not output_dir.exists(): output_dir.mkdir(parents=True, exist_ok=True) output_dir: str = str(output_dir) out_nodes = [] for i in range(len(keras_model.outpu...
[ "def model_keras(self) -> tf.keras.Model:", "def convert_to_tf_record(_):\n\n mnist = input_data.read_data_sets(\n \"/tmp/tensorflow/mnist/input_data\",\n reshape=False\n )\n\n convert_to(mnist.validation, 'validation', FLAGS.data_directory)\n convert_to(mnist.train, 'train', FLAGS.data_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
as the method name suggests this returns the up to date station information.
def get_current_station_info(cls, dbsession): sub = dbsession.query(UsageData.station_id, func.max(UsageData.id).label('max_update')).group_by( UsageData.station_id).subquery() return dbsession.query( UsageData.last_update, UsageData.available_bike_stands, UsageData....
[ "async def _current_station_data(self) -> None:\n endpoint = f\"observations/station/{self._station_id}?token={self._token}\"\n json_data = await self.async_request(\"get\", endpoint)\n\n station_name = json_data.get(\"station_name\")\n\n cnv = ConversionFunctions()\n items = []\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a list of bikes for a provided weekday and station. averaged per hour so 24 results.
def get_bikes_for_weekday(cls, dbsession, weekday, station_id): station = [("Time", "Available Bikes", "Available Stands")] station_data = dbsession.query(func.hour(cls.last_update), func.avg(cls.available_bikes), func.avg(...
[ "def get_hourly(station_id):\n hourdata = db.session.query(func.avg(DublinBike.available_bike)) \\\n .filter(DublinBike.number == station_id) \\\n .group_by(extract('hour', DublinBike.localtime)) \\\n .order_by(extract('hour', DublinBike.localtime)) \\\n .all()\n return jsonify([\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
finds days where there was wet weather.
def findWetWeatherDays(self, dbsession, today): wetDays = dbsession.query(self.dt).filter(or_(self.weather_description == "light rain", self.weather_description == "moderate rain")).all() # if one of those days is today return it. # else just return a wet day. for i in range(len(wetDays)...
[ "def getTodayInformation():\n curr_date = datetime.now()\n curr_day = workingdays_passed = 0\n is_weekday = False\n for week in calendar.Calendar().monthdayscalendar(curr_date.year, curr_date.month):\n for i, day in enumerate(week):\n if day != 0:\n curr_day += 1\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For two participants, at most one channel can be opened
def test_max_1_channel( token_network: Contract, get_accounts: Callable, create_channel: Callable ) -> None: (A, B) = get_accounts(2) create_channel(A, B) with pytest.raises(TransactionFailed, match="TN/open: channel exists for participants"): token_network.functions.openChannel(A, B).call() ...
[ "def single_channel():\n return True", "def joined(self, channel):", "def open_channel(self):\n # print('Creating a new channel') #debug\n return self._connection.channel()", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_membe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getParticipantsHash() behaves as get_participants_hash
def test_participants_hash(token_network: Contract, get_accounts: Callable) -> None: (A, B) = get_accounts(2) AB_hash = get_participants_hash(A, B) assert token_network.functions.getParticipantsHash(A, B).call() == AB_hash assert token_network.functions.getParticipantsHash(B, A).call() == AB_hash
[ "def get_hash(self, composition):\n return", "def test_participants_hash_equal(token_network: Contract, get_accounts: Callable) -> None:\n (A,) = get_accounts(1)\n\n with pytest.raises(ValueError):\n get_participants_hash(A, A)\n with pytest.raises(TransactionFailed, match=\"TN: identical a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getParticipantsHash() behaves as get_participants_hash on equal addresses
def test_participants_hash_equal(token_network: Contract, get_accounts: Callable) -> None: (A,) = get_accounts(1) with pytest.raises(ValueError): get_participants_hash(A, A) with pytest.raises(TransactionFailed, match="TN: identical addresses"): token_network.functions.getParticipantsHash(A...
[ "def test_participants_hash(token_network: Contract, get_accounts: Callable) -> None:\n (A, B) = get_accounts(2)\n\n AB_hash = get_participants_hash(A, B)\n assert token_network.functions.getParticipantsHash(A, B).call() == AB_hash\n assert token_network.functions.getParticipantsHash(B, A).call() == AB_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a vdd rail at the top of the cell
def route_vdd_rail(self): # adds the rail across the width of the cell vdd_position = vector(0, self.height - self.m1_width) self.add_rect(layer="metal1", offset=vdd_position, width=self.width, height=self.m1_width) pmos_...
[ "def add_vdd_rail(self):\n # adds the rail across the width of the cell\n vdd_position = vector(self.pclk_position.x,\n self.height - drc[\"minwidth_metal1\"])\n self.add_layout_pin(text=\"vdd\",\n layer=\"metal1\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create both the upper_pmos and lower_pmos to the module
def create_ptx(self): self.lower_pmos_inst=self.add_inst(name="lower_pmos", mod=self.pmos) self.connect_inst(["bl", "en", "br", "vdd"]) self.upper_pmos1_inst=self.add_inst(name="upper_pmos1", mod=self.pmos) ...
[ "def add_ptx(self):\n # adds the lower pmos to layout\n base = vector(self.width - self.temp_pmos.width, 0).scale(0.5,0)\n self.lower_pmos_position = base + vector([drc[\"metal1_to_metal1\"]]*2)\n self.add_inst(name=\"lower_pmos\",\n mod=self.lower_pmos,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Place both the upper_pmos and lower_pmos to the module
def place_ptx(self): # Compute the other pmos2 location, but determining offset to overlap the # source and drain pins self.overlap_offset = self.pmos.get_pin("D").ll() - self.pmos.get_pin("S").ll() # adds the lower pmos to layout #base = vector(self.width - 2*self.pmos...
[ "def add_ptx(self):\n # adds the lower pmos to layout\n base = vector(self.width - self.temp_pmos.width, 0).scale(0.5,0)\n self.lower_pmos_position = base + vector([drc[\"metal1_to_metal1\"]]*2)\n self.add_inst(name=\"lower_pmos\",\n mod=self.lower_pmos,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connects the upper and lower pmos together
def connect_poly(self): offset = self.lower_pmos_inst.get_pin("G").ll() # connects the top and bottom pmos' gates together ylength = self.upper_pmos1_inst.get_pin("G").ll().y - offset.y self.add_rect(layer="poly", offset=offset, width=self.pol...
[ "def connect_poly(self):\n offset = (self.lower_pmos_position \n + self.lower_pmos.poly_positions[0]\n + vector(0,self.lower_pmos.poly_height))\n # connects the top and bottom pmos' gates together\n ylength = (self.upper_pmos_position.y \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the en input rail, en contact/vias, and connects to the pmos
def route_en(self): # adds the en contact to connect the gates to the en rail on metal1 offset = self.lower_pmos_inst.get_pin("G").ul() + vector(0,0.5*self.poly_space) self.add_contact_center(layers=("poly", "contact", "metal1"), offset=offset, ...
[ "def __connect(self, sinaps, inputNeuron, outputNeuron):\n sinaps.outNeuron = outputNeuron\n sinaps.inputNeuron = inputNeuron\n outputNeuron.addInputSinaps(sinaps)\n inputNeuron.addOutputSinaps(sinaps)", "def connectionMade(self):\n super().connectionMade()\n\n # negociat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds both bitline and bitlinebar to the module
def route_bitlines(self): # adds the BL on metal 2 offset = vector(self.bitcell.get_pin(self.bitcell_bl).cx(),0) - vector(0.5 * self.m2_width,0) self.add_layout_pin(text="bl", layer="metal2", offset=offset, width...
[ "def add_bitlines(self):\n # adds the BL on metal 2\n offset = vector(self.bitcell.get_pin(\"BL\").cx(),0) - vector(0.5 * drc[\"minwidth_metal2\"],0)\n self.add_layout_pin(text=\"BL\",\n layer=\"metal2\",\n offset=offset,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds contacts/via from metal1 to metal2 for bitlines
def add_bitline_contacts(self): stack=("metal1", "via1", "metal2") pos = self.lower_pmos_inst.get_pin("S").center() self.add_contact_center(layers=stack, offset=pos) pos = self.lower_pmos_inst.get_pin("D").center() self.add_contact_center(layers=s...
[ "def add_bitline_contacts(self):\n # helps centers the via over the underneath contact\n self.xcorrect_upper = 0.5 * abs(self.upper_contact.width \n - self.upper_pmos.active_contact.width)\n self.xcorrect_lower = 0.5 * abs(self.lower_contact.width \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connect pmos pin to bitline pin
def connect_pmos(self, pmos_pin, bit_pin): ll_pos = vector(min(pmos_pin.lx(),bit_pin.lx()), pmos_pin.by()) ur_pos = vector(max(pmos_pin.rx(),bit_pin.rx()), pmos_pin.uy()) width = ur_pos.x-ll_pos.x height = ur_pos.y-ll_pos.y self.add_rect(layer="metal2", of...
[ "def add_pins(self):\n\n for bit in range(self.addr_size):\n self.add_pin(\"addr_{0}\".format(bit),\"INPUT\")\n \n self.add_pin(\"wl_en\", \"INPUT\")\n\n for bit in range(self.num_rows):\n self.add_pin(\"wl_{0}\".format(bit),\"OUTPUT\")\n \n self.add_p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change first convolution layer input channels.
def patch_first_conv(model, in_channels): # get first conv for module in model.modules(): if isinstance(module, nn.Conv2d): break # change input channels for first conv module.in_channels = in_channels weight = module.weight.detach() reset = False if in_channels == 1: ...
[ "def patch_first_conv(model, in_channels):\n\n # get first conv\n for module in model.modules():\n if isinstance(module, nn.Conv2d):\n break\n\n # change input channels for first conv\n module.in_channels = in_channels\n weight = module.weight.detach()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if user's answer matches with answer from database.
def is_correct_answer(answer): db_answer = Answer.objects.get(id=int(list(answer.keys())[0])) return db_answer.is_correct == bool(list(answer.values())[0])
[ "def check(self, answer):\n return self.answer == answer", "def check_if_correct(self, guess):\n answer = MCQAnswer.objects.get(id=guess)\n\n if answer.correct is True:\n return True\n else:\n return False", "def checkanswer(cls, username, answer):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the emission_rest_wavelengths parameter is present, return a nebular emission line spectrum. Currently uses several approximations for the velocity broadening. Currently does not affect photometry. Only provides samples of the nebular spectrum at outwave, so will not be correct for total power unless outwave densley...
def nebular(self, params, outwave): if 'emission_rest_wavelengths' not in params: return 0. mu = vac2air(params['emission_rest_wavelengths']) # try to get a nebular redshift, otherwise use stellar redshift, # otherwise use no redshift a1 = params.get('zred_emission',...
[ "def getSpectralEnergy(datatype, traceList, outfile, channelStart, channelEnd):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_ra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the parameters, recording whether it was new for the ssp or basis parameters. If either of those changed, regenerate the relevant spectral grid(s).
def update(self, newparams): for k, v in list(newparams.items()): if k in self.basis_params: # Make sure parameter is in dict, and check if it changed if k not in self.params: self.basis_dirty = True self.params[k] = v ...
[ "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def update_params (self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rebuild the component spectra from the SSPs. The component spectra include dust attenuation, redshifting, and spectral regridding. This is basically a proxy for COMPSP from FSPS, with a few small differences. In particular, there is interpolation in metallicity and the redshift and the output wavelength grid are taken ...
def build_basis(self): if self.debug: print('sps_basis: rebuilding basis') # Setup the internal component basis arrays inwave = self.ssp.wavelengths nbasis = len(np.atleast_1d(self.params['mass'])) self.nbasis = nbasis # nbasis = ( len(np.atleast_1d(self.param...
[ "def _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg,\n label=None, picks=None, pick_ori=None, verbose=None):\n\n is_free_ori, _, proj, vertno, G =\\\n _prepare_beamformer_input(info, forward, label, picks, pick_ori)\n\n Cm = data_csd.data\n\n # Calculating regularized...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP) Transform and align a face in an image.
def align(imgDim, rgbImg, landmarks, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP, skipMulti=True): assert imgDim is not None assert rgbImg is not None assert landmarks is not None #if bb is None: # bb = self.getLargestFaceBoundingBox(rgbImg, skipMult...
[ "def align_face(self, image_dimensions, image, bounding_box=None,\n landmarks=None, landmark_indices=INNER_EYES_AND_BOTTOM_LIP,\n skip_multi=False):\n assert image_dimensions is not None\n assert image is not None\n assert landmark_indices is not None\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a bs4 object containing all the tags in doc of the URL
def _grab_tags(self, url): a = self._api_request(url) return bs4.BeautifulSoup(a,features="html.parser")
[ "def _grab_tags(self, url):\n obj = self._api_get(url).text\n return bs4.BeautifulSoup(obj, features=\"html.parser\")", "def soupify(url):\n raw_html = requests.get(url).content\n return bs(raw_html, 'html.parser')", "def get_soup(url: str):\n return BeautifulSoup(requests.get(url).conten...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the workspace zip for the specific build URL by parsing HTML The API has no way of retrieving the workspace zip AFAIK
def get_workspace_zip(self): workspace_api = '/ws/' # print("Checking Workspaces For: {}".format(self.url)) workspace_elements = self._grab_tags(self.url + workspace_api) workspace_links = [] root_domain = urllib.parse.urlparse(self.url).scheme + '://' + urllib.parse.urlparse(sel...
[ "def get_workspace_zip(self):\n workspace_api = \"/ws/\"\n # print(\"Checking Workspaces For: {}\".format(self.url))\n workspace_elements = self._grab_tags(self.url + workspace_api)\n workspace_links = []\n root_domain = (\n urllib.parse.urlparse(self.url).scheme\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively search through all jobs and projects to pull out build URLs
def get_all_build_links(url, auth=None, netloc_force=False): all_build_links = [] if 'api/json' not in url: # if the api endpoint isnt appended, then append it: url += '/api/json/' def recurse_to_build(url): orig_url = urllib.parse.urlparse(url) try: json_reply = ...
[ "def get_all_build_links(url, auth=None, netloc_force=False):\n all_build_links = []\n if \"api/json\" not in url:\n # if the api endpoint isnt appended, then append it:\n url += \"/api/json/\"\n\n def recurse_to_build(url):\n orig_url = urllib.parse.urlparse(url)\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
To load the descriptor settings from the config file,only HOG is supported
def load_descriptor(settings): return { 'hog': descriptors.HogDescriptor.from_config_file(settings['hog']), }.get(settings['train']['descriptor'], 'hog') # Default to HOG for invalid input
[ "def configure(self):\n self._validate_cfg()\n\n if not self.scripts_path:\n # If scripts directory is not provided, see if there is a \"scripts\"\n # directory next to the descriptor. If found - assume that's the\n # directory containing scripts.\n scripts ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generator which yields all files in the given directories with any of the EXTENSIONS.
def get_files(dirs): for dir in dirs: for root, _, files in os.walk(dir): for file in files: path = Path(os.path.join(root, file)) if path.suffix in EXTENSIONS: yield path
[ "def _iter_data_extensions(directory):\n for _, _, files in os.walk(directory):\n for path in files:\n _, extension = os.path.splitext(path)\n\n if extension not in _PYTHON_EXTENSIONS:\n yield \"*\" + extension", "def file_iter(root_path, extensions):\n ret = []\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the hash preprocessors of the state and the action, in order to make them hashable.
def _initialize_hash(self): # action if isinstance(self.env.action_space, gym.spaces.Discrete): self._hash_action = lambda x: x elif isinstance(self.env.action_space, gym.spaces.Box): if self.__class__.__name__ == "MCTS": raise Exception("Cannot run vanil...
[ "def hash_functions(self):\n pass", "def state_hash(self, s):\n pass", "def create_hash_functions(self):", "def _state_actions(self) -> dict:\n return {}", "def build_preprocessors(md_instance, **kwargs):\r\n preprocessors = odict.OrderedDict()\r\n if md_instance.safeMode != 'esca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Explores the current tree with the UCB principle until we reach an unvisited node where the reward is obtained with random rollouts.
def grow_tree(self): decision_node = self.root internal_env = copy.copy(self.env) while (not decision_node.is_final) and decision_node.visits > 1: a = self.select(decision_node) new_random_node = decision_node.next_random_node(a, self._hash_action) (new_d...
[ "def UCT(rootstate, itermax, verbose = False):\n\n tree.clear()\n # TODO slide to the next position keeping the using tree benefiting from the subtree playouts.\n rootnode = Node(state = rootstate)\n\n for i in range(itermax):\n #print(i)\n node = rootnode\n state = rootstate.Clone(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a RandomNode returns a DecisionNode
def select_outcome(self, env, random_node): new_state_index, r, done, _ = env.step(random_node.action) return DecisionNode(state=new_state_index, father=random_node, is_final=done), r
[ "def get_random_node(self):\n if random.randint(0, 100) > self.goal_sample_rate:\n random_node = self.Node(\n random.uniform(self.min_rand, self.max_rand),\n random.uniform(self.min_rand, self.max_rand),\n )\n else: # goal point sampling\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
At the end of the simulations returns the most visited action
def best_action(self): number_of_visits_children = [node.visits for node in self.root.children.values()] index_best_action = np.argmax(number_of_visits_children) a = list(self.root.children.values())[index_best_action].action return a
[ "def chooseAction(self, gameState):\n probabilities = self.assignProbablities(gameState)\n #print probabilities\n prob, bestProbabilityAction = max(probabilities)\n return bestProbabilityAction", "def best_sequence(self):\n max_cumulative = max(self.reward_history)\n for ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.
def from_expression( cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None ) -> Step: ctes = ctes or {} expression = expression.unnest() with_ = expression.args.get("with") # CTEs break the mold of scope and introduce themselves to all in the context. ...
[ "def expression_to_sql(self, expr, state):\n expr_type = type(expr)\n if expr_type == parser.Literal:\n return state.get_literal_parameter(expr.value)\n elif expr_type == parser.Parameter:\n value = self.get_parameter_value(expr.name)\n return state.get_literal_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This will continue splitting the tree until every leaf node is pure and the training data is perfectly characterized by the decision tree
def train(self): max_tuple = self.max_gain() # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop while max_tuple.gain != 0: max_tuple.node.split(max_tuple.attribute) max_tuple = self.max_gain()
[ "def train(self):\r\n stack_nodes = [self.root]\r\n # !!! Write code to train decision tree. If the node is pure, set the majority_class attribute.\r\n # Use .pop(0) to pop the top of the stack\r\n for data in stack_nodes:\r\n self.split_attr, self.split_value =compute_best_sp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This splits a node on the attribute "attribute"
def split(self, attribute): if attribute not in self.attributes: raise KeyError('Attribute not present in node') self.split_attr = attribute # list() is used to make a copy of the list instead of pointing to the same list child_attributes = list(self.attribu...
[ "def test_splitter_force_attr_fail_attr():\n split_attribute = 'foobar'\n\n input_items = list()\n input_items.extend([TestItem('ref0', 'value%s' % i) for i in range(10)])\n input_items.extend([TestItem('ref1', 'value%s' % i) for i in range(5)])\n\n _ = get_splitter(input_items, 0, split_attribute).s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sort and retrieve top rows of df
def get_top_recipes(df, sort_params=None, count=10): if not sort_params: logging.warning("Column names to soty by are not defined.") return df return df.sort_values(sort_params["names"], ascending=sort_params["order"]).head(count)
[ "def top_player_points(df):\n return df.sort_values(\"total_points\", ascending=False)", "def _top_k(self, args):\n data, row = args\n data, row = zip(*sorted(zip(data, row), reverse=True)[:self.k])\n return data, row", "def top_n_rows(dataframe, rownumber):\n df = dataframe.head(rown...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1. parse the json object and extract name, headline, prepTime, ratingsCount, favoritesCount, nutrition and export to a csv file 2. retrieve top 10 recipes based on ratingsCount, favoritesCount and export to a csv file
def read_recipes(year, week): # read config file cp = ConfigParser() cp.read("config.ini") # load menu data fname_json = cp["other"]["json_out_fname"] if not os.path.exists(fname_json): logging.error("JSON file not found.") return with open(fname_json) as f: m...
[ "def parse_search_response(response):\n\n recipes = []\n matches = response['matches']\n for recipe in matches:\n thumbnails = []\n if 'smallImageUrls' in recipe.keys():\n thumbnails = recipe['smallImageUrls']\n\n site = ''\n if 'sourceDisplayName' in recipe.keys():\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether the test has passed by comparing its stdout to what is expected.
def check_test(self, test): (stdout, stderr) = (out.decode('ascii').strip() for out in test.process.communicate()) self.assertEqual(stderr, "") self.assertEqual(stdout, EXPCT_RESULTS[test.number], "Test {} failed".format(test.number)) ...
[ "def testStdoutAndStderr(self):\n with self.OutputCapturer():\n print('foo')\n print('bar', file=sys.stderr)\n self.AssertOutputContainsLine('foo')\n self.AssertOutputContainsLine('bar', check_stdout=False, check_stderr=True)", "def assertStdout(self, output):\n self.failUnlessEqual(self.s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start the next test.
def start_next_test(self): next_test_num = self.test_numbers.popleft() self.tests.append( self.TEST( process=Popen(COMMANDS[next_test_num], stdout=PIPE, stderr=PIPE), number=next_test_num))
[ "def start_test_run(self):", "def test_run_started(self):", "def testSimStart(self):\n\t\tpass", "def test_start_test(self):\n self.protocol.startTest(self.test)\n self.assertEqual(self.io.getvalue(), compat._b(\n \"test: %s\\n\" % self.test.id()))", "def test_start(self, testname):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Poll tests for completion. When one finishes, start another one if there are more to run. Stop when all are finished.
def poll_tests(self): for i, test in enumerate(self.tests): if test.process.poll() is not None: self.check_test(test) self.tests.pop(i) if self.test_numbers: self.start_next_test()
[ "def _wait_for_all_operations_done(self):\n while self._test_names_to_processes:\n time.sleep(10)\n running_test_names = list(self._test_names_to_processes.keys())\n for test_name in running_test_names:\n running_proc = self._test_names_to_processes.get(test_name)\n return_code = run...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the tests to be run. These may be given as a single number, a commaseperated list or two numbers seperated by a dash.
def parse_tests(tests_input): if '-' in tests_input: limits = tests_input.partition('-') tests = list(range(int(limits[0]), int(limits[2]) + 1)) else: tests = [int(t) for t in tests_input.split(',')] return tests
[ "def run_and_parse(test_description: Tuple[str, str, List[str]]):\n test_executable, test_name, performance_counters = test_description\n try:\n test_output = run_test(test_executable, test_name, performance_counters)\n print(f'Finished running test {test_name}', file=sys.stderr)\n return (test_name, par...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find which aggregator will be use, accordly cli args
def setup(self, args): for key, ags in self._mapp.items(): arg = args.get(key) if arg: #if exist, turn aggregator actived and create a new instance a new aggregator class self.active = True return ags(arg)
[ "def aggregator_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"aggregator_name\")", "def aggregator_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"aggregator_name\")", "def aggregator_name(self) -> str:\n return pulumi.get(self, \"aggregator_name\")", "def han...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Used by Crawler class, append a line on instance of aggregator setuped.
def append(self, line): self.ag.append(line)
[ "def do_add_node(self, line=''):\n self.fibbing.add_node()", "def log(self, line):\n self.body.append(line)", "def __log_line(self, line):\n logger.info(\"CM: %s\" % line)\n self.log.append(line)\n if len(self.log) > self.log_lines:\n self.log = self.log[-(self.log_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Release module to pypi
def release_pypi(): local('python setup.py clean sdist register upload')
[ "def release():\n\n from secrets import pypi_auth\n\n # Check that all changes are committed before creating a new version\n git_check()\n\n # Test package\n test()\n\n # Increment version\n inc_version()\n\n # Commit new version, create tag for version and push everything to origin\n git...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pylint and PEP8 QA report generator We use subprocess instead local because pylint and pep8 don't return a zero exit code. This behaviour is incompatible with fabric...
def release_qa(): lines = StringIO.StringIO(local('find . -name "*.py"', capture=True)) for line in lines.readlines(): print "PYLINT CHECK" print "-----------------------" pyfile = os.path.normpath(line).replace("\n","").replace("\r","") reportfilename = pyfile.replace("....
[ "def main():\n\n parser = argparse.ArgumentParser(\n description=\"Pylint to Pycharm message converter. \"\n \"Additional arguments starting with '--' are forwarded to Pylint.\"\n )\n parser.add_argument(\"-v\", \"--virtualenv\", help=\"path to virtual environment\")\n parser.a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
installs and configures a fresh DIRAC UI (VO specific)
def install_ui(): # pick which VO I want to test, default gridpp print "Which VO do you want to test (default: gridpp) ?" user_VO = raw_input("Your choices are: gridpp, lz, lsst, solidexperiment.org, skatelescope.eu: ") \ or "gridpp" if user_VO not in ["gridpp", "lz", "lsst", "solidexperiment.org", "skate...
[ "def install():\n addKey()\n addAdmin()\n addGuideline()", "def bootstrap():\n validate_configurator_version()\n\n # put new mkinitcpio.conf in place\n run(\"mv /etc/mkinitcpio.conf.pacnew /etc/mkinitcpio.conf\")\n sed(\"/etc/mkinitcpio.conf\",\n 'MODULES=\"\"',\n 'MODULES=\"xen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called before anything else, i.e. just after installer controller creation. Return value is ignored.
def pre_installation(self): pass
[ "def preInit():\n\treturn None", "def _pre_init(self):\n pass", "def pre_command(self):\r\n pass", "def test_initialization(self, create_controller: Controller) -> None:\n pass", "def pre_start_hook(self):\n\n LOG.debug(_('XManager pre_start_hook...'))\n\n pass", "def _b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called before any files have downloaded.
def pre_download(self, remote_files): pass
[ "def download_and_prepare(self):\n self._download_and_prepare()", "def OnDownloadBegin(self):", "def download_files(self):", "def post_download(self, remote_files):\n pass", "def OnDownloadComplete(self):", "def initialize_sync(self):\n self.donwnloader.download_all()\n self.upload...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called after every files have been downloaded.
def post_download(self, remote_files): pass
[ "def download_finish(self, cloud_file):", "def pre_download(self, remote_files):\n pass", "def OnDownloadComplete(self):", "def download_files(self):", "def _clear_downloaded() -> None:\n if DOWNLOAD_DIRECTORY.is_dir(): # If the download directory exists\n for file in os.listdir(DOWNLOAD_D...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called before the installation of any pkg.
def pre_install(self, installable_pkgs): pass
[ "def pre_install_pkg(self, installable_pkg):\n pass", "def pre_installation(self):\n pass", "def post_install(self, installable_pkgs):\n pass", "def post_install_pkg(self, installable_pkg):\n pass", "def install(self):\r\n if not self.installed:\r\n sys.settrace...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called before the installation of the given installable pkg.
def pre_install_pkg(self, installable_pkg): pass
[ "def pre_install(self, installable_pkgs):\n pass", "def post_install_pkg(self, installable_pkg):\n pass", "def post_install(self, installable_pkgs):\n pass", "def pre_installation(self):\n pass", "def setPkgRequired(self, *args):\n return _libsbml.SBMLDocument_setPkgRequir...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called after the successful installation of the given installable pkg.
def post_install_pkg(self, installable_pkg): pass
[ "def post_install(self, installable_pkgs):\n pass", "def pre_install_pkg(self, installable_pkg):\n pass", "def _handle_pkg_inst_extras(self, package: Package):\n # TODO: This method should be removed after transition to use of\n # Package.handle_inst_extras()\n msg_src =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called after the successful installation of all pkg.
def post_install(self, installable_pkgs): pass
[ "def post_install_pkg(self, installable_pkg):\n pass", "def post_installation(self, exc_value):\n pass", "def pre_install(self, installable_pkgs):\n pass", "def pre_install_pkg(self, installable_pkg):\n pass", "def run_install():\r\n pass", "def complete_setup(self, app):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called after anything else (will be called if pre_installation returned successfully) exc_value is None if no error, else the exception value
def post_installation(self, exc_value): pass
[ "def check_for_setup_error(self):", "def pre_handle_error(self, exception):\n pass", "def postcondition(self, result, exc_info, *args, **kwargs):\n pass", "def pre_installation(self):\n pass", "def __raise_clean_exception(exc_type, exc_value, exc_traceback):\n if exc_type.__name_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
From the list of known to be upgraded pkgs, return a list of tuple (installed_pkg, [], [])) such that both list doesn't contains the pkg of the installed pkg. Also, if a new package to install is found in more than in one list, it will be discard in the later list.
def preprocess_upgrade_list(self, upgrade_list): return [(ed_pkg, able_pkg, [], []) for (ed_pkg, able_pkg) in upgrade_list]
[ "def get_new_installed_pkgs(self):\n old_pkgs = self.installed_packages\n new_pkgs = self.list_installed_packages()\n has_no_prefix = lambda candidate, pklist: len(list(filter(lambda z: z in candidate and z != candidate, pklist))) == 0\n return list(filter(\n lambda x: x not ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copies the model parameters of one estimator to another.
def copy_model_parameters(sess, estimator1, estimator2): e1_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator1.scope)] e1_params = sorted(e1_params, key=lambda v: v.name) e2_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator2.scope)] e2_params = sorte...
[ "def copy_model_parameters(sess, estimator1, estimator2):\n start = time.time()\n e1_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator1.scope)]\n e1_params = sorted(e1_params, key=lambda v: v.name)\n e2_params = [t for t in tf.trainable_variables() if t.name.startswith(estimat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the maximum sequence bounds of non idle time Machines shows default values at the beginning and end of the operations; this functions returns the ids of the longest sequence that is not operating with the default values. Note that you cannot just remove all default values, essentially because order matters and the...
def get_id_bounds( values: Tensor, default_value: float ): # get all values that are not default ones default_value_idx = (values == default_value).nonzero()[:, 0] # get the longest sequence without interruption # to do this, get the difference of the above ids diff = default_value_idx[1:] -...
[ "def seq_maxpool(x):\n seq,mask=x\n seq-=(1-mask)*1e10\n return K.max(seq,1)", "def seq_maxpool(x):\n seq, mask = x\n seq -= (1 - mask) * 1e10\n return K.max(seq, 1)", "def find_max_gap(self, free_space_ranges):\n # mask the bubble\n masked = np.ma.masked_where(free_space_ranges=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a list of strings indicating available devices to test on. Checks for CUDA devices, primarily. Assumes CPU is always available.
def get_test_devices(): # Assumption: CPU is always available devices = ['cpu'] if torch.cuda.is_available(): devices.append('cuda') return devices
[ "def get_test_devices():\n devices = [\"cpu\"]\n if torch.cuda.is_available():\n devices.append(\"cuda\")\n return devices", "def get_available_devices():\n executable_path = os.path.join(os.path.dirname(__file__), 'build')\n try:\n num_devices = int(subprocess.check_output(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the pickled spacy objects
def read_spacy_pickle(self, file_path): vocab = self.nlp.vocab try: file = open(file_path, "rb") # putting the spacy doc in a single-item list to avoid pandas splitting it up spacy_objects = [[Doc(vocab).from_bytes(x)] for x in pickle.load(file)] file.cl...
[ "def spacy_model_loader(path):\n with open(path, \"rb\") as f:\n model = pickle.load(f)\n nlp = spacy.blank(model[\"lang\"])\n for pipe_name in model[\"pipeline\"]:\n pipe = nlp.create_pipe(pipe_name)\n nlp.add_pipe(pipe)\n nlp.from_bytes(model[\"bytes_data\"])\n\n return nlp", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }