query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Returns a dictionary mapping models to list of reports. Only reports that the user is allowed to access are returned.
def get_reports_by_model(user): reports_by_model = {} for report in _registry.values(): if report.check_permission(user): reports_for_model = reports_by_model.setdefault(report.model, []) reports_for_model.append(report) return reports_by_model
[ "def GetAnalysisReports(self):\n return self._store.GetAnalysisReports()", "def resourceReports(self):\n self.resourceReportList = {}\n for k, v in self.activeResources.iteritems():\n self.resourceReportList[k] = v.report()\n for k, v in self.inactiveResources.iteritems():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if a report exists.
def report_exists(report_id): return report_id in _registry
[ "def report_exists(account_id: str, profile_id: str, report_name: str) -> object:\n service = _get_service()\n request = service.reports().list(profileId=profile_id)\n response = request.execute()\n if logging.get_verbosity() == 1: # Debug.\n pp = pprint.PrettyPrinter(indent=2)\n logging.debug(pp.pformat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a report instance for using its ID. If the user does not have the correct permission for the report, PermissionDenied is raised.
def get_report_by_id(report_id, user): report = _registry[report_id] if not report.check_permission(user): raise PermissionDenied return report
[ "def get_report_instance(report_id):\n for cls in Report.__subclasses__():\n if cls.get_report_id() == report_id:\n return cls()\n return None", "def get(self, crash_report_id):\n report = db.getCrashReport(crash_report_id)\n if report:\n return report\n els...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A fast implementation of the forward pass for a convolutional layer based on im2col and col2im.
def conv_forward_im2col(x, w, b, conv_param): N, C, H, W = x.shape num_filters, _, filter_height, filter_width = w.shape stride, pad = conv_param['stride'], conv_param['pad'] # Check dimensions assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work' assert (H + 2 * pad - filter...
[ "def forward_convolution(conv_W, conv_b, data):\n\n conv_channels, _, conv_width, conv_height = conv_W.shape\n\n input_channels, input_width, input_height = data.shape\n\n output = np.zeros((conv_channels, input_width - conv_width + 1, input_height - conv_height + 1))\n\n for x in range(input_width - co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A fast implementation of the backward pass for a convolutional layer based on im2col and col2im.
def conv_backward_im2col(dout, cache): x, w, b, conv_param, x_cols = cache stride, pad = conv_param['stride'], conv_param['pad'] db = np.sum(dout, axis=(0, 2, 3)) num_filters, _, filter_height, filter_width = w.shape dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1) dw = dout...
[ "def conv_backward_naive(dout, cache):\n #############################################################################\n # TODO: Implement the convolutional backward pass. #\n #############################################################################\n x, w, b, conv_param = cache\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a mock, shared Driftwood object
def driftwood(): d = mock.Mock() d.config = { 'database': { 'root': 'db.test', 'name': 'test.db' } } d.log.msg.side_effect = Exception('log.msg called') return d
[ "def mock(self):\r\n return self._mock", "def __init__(self, mock):\n\n self.mock = mock\n self.response = None", "def get_deployment_mock():", "def _MockInsideChroot(self):\n mic = self.mox.CreateMock(cgt.InsideChroot)\n\n mic.creds = self.mox.CreateMock(gdata_lib.Creds)\n mic.gd_cl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DatabaseManager should create the directory db.test if it doesn't exist already.
def test_create_db_dir_if_not_exist(self): databasemanager.DatabaseManager(driftwood())
[ "def test_create_db_file_if_not_exist(self):\n databasemanager.DatabaseManager(driftwood())", "def test_database(self):\n tester = os.path.exists(\"lingualizer_alchemy.db\")\n self.assertEqual(tester, True)", "def init_db(self):\n\n # is there a DB already?\n if os.path.exists...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DatabaseManager should create the file test.db if it doesn't exist already.
def test_create_db_file_if_not_exist(self): databasemanager.DatabaseManager(driftwood())
[ "def test_create_db_dir_if_not_exist(self):\n databasemanager.DatabaseManager(driftwood())", "def test_database(self):\n tester = os.path.exists(\"lingualizer_alchemy.db\")\n self.assertEqual(tester, True)", "def init_db(self):\n\n # is there a DB already?\n if os.path.exists(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return data for day. Assumes data files are in ../data/
def get_data(day_num: int) -> Generator[str, None, None]: data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'data') with open(os.path.join(data_dir, f'day_{day_num}.txt'), 'r') as fobj: yield from fobj
[ "def data(ignore_date=False):", "def get_day_predictions(day):\n ldir = glob.glob(status_path + day + '/*-dadestram.data')\n if not ldir:\n raise Exception('Day does not exists')\n ldata = []\n for f in sorted(ldir):\n ldata.append(DataTram(f))\n return ldata", "def get_data_files(f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split a line by tabs and convert each element
def tab_split(line: str, converter: Callable[[str], Any]=str) -> List[Any]: return [converter(x) for x in line.split('\t')]
[ "def __convert_tabs_to_spaces(self, spaces, tabs, line):\n\n line = line.replace('\\t', ' ')\n spaces += tabs * 4\n return spaces, line", "def read_ptsv(line):\n return map(read_ptsv_element, line.rstrip().split('\\t'))", "def line_to_tokens(line):\n if len(line) < 1:\n yield ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a varaible in the Plan. This method is used to create the variables that are needed in the Plan in order to add an entry for the outbound connection pool for the new data source.
def makeDeploymentPlanVariable(wlstPlan, name, value, xpath, origin='planbased'): try: variableAssignment = wlstPlan.createVariableAssignment(name, moduleOverrideName, moduleDescriptorName) variableAssignment.setXpath(xpath) variableAssignment.setOrigin(origin) wlstPlan.createVariable(name, value...
[ "def _create_variables(self) -> None:\n if self.relaxed:\n kind = LpContinuous\n else:\n kind = LpInteger\n\n # List all combinations of apps and instances and workloads\n comb_res = cartesian_product(self.system.apps, self.cooked.instances_res)\n comb_dem = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
To make sure city and country names like 'London, UK' works.
def test_city_country_names(self): city = formatted_city_country('london', 'united kingdom') self.assertEqual(city, 'London, United Kingdom')
[ "def test_city_country(self):\n formatted_city_country = city_country('santiago', 'chile')\n self.assertEqual(formatted_city_country, 'Santiago, Chile')", "def test_city_country_name_and_population(self):\r\n city_information = formatted_city_country('london', 'united kingdom', 8900000)\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
To make sure city information in this form 'London, United Kingdom population 8900000' works.
def test_city_country_name_and_population(self): city_information = formatted_city_country('london', 'united kingdom', 8900000) self.assertEqual(city_information, 'London, United Kingdom - Population 8900000')
[ "def test_city_country_names(self):\r\n city = formatted_city_country('london', 'united kingdom')\r\n self.assertEqual(city, 'London, United Kingdom')", "def test_city_country(self):\n formatted_city_country = city_country('santiago', 'chile')\n self.assertEqual(formatted_city_country,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unit test for 'predict(...)'.
def test_predict(self): assert 2 == 2
[ "def test_predict():\n\t\n\t# Create a row of data and run prediction.\n\thome = 'Arsenal'\n\taway = 'Chelsea'\n\tstats = pd.read_sql_query(\"select * from stats;\", engine)\n\tmodel = joblib.load('./model.pkl')\n\tresult = prediction.prediction(home, away, stats, model)\n\n\t# Check type of output.\n\tassert isins...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unit test for 'reverse_encode(...)'.
def test_reverse_encode(self): reverse = self.test_wbn.reverse_encode([0, 1]) assert isinstance(reverse, list)
[ "def test_decode():\n assert one.decode(one.encode(\"Hello World\")) == \"hello world\"", "def test_encode():\n assert one.encode(\n \"Hello World\") == \"11481249678067805698 10695698668367809533\"", "def test_encode():\n\n assert ceaser.encode(\"bbb\", 3) == \"eee\"\n\n assert ceaser.encode...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Hook into nosetests or other unittest based frameworks. The hook will throw exceptions such that a debugger like PyCharm can inspect them easily. This will only be done if there is just a single test case. This code might be a bit experimental. It should work though. But if it does not, we can also skip this. Currently...
def _try_hook_into_tests(): # Check if this is run inside a debugger. Skip if this is not the case. import sys get_trace = getattr(sys, "gettrace", None) if not get_trace: return if get_trace() is None: return # get TestProgram instance from stack... from unittest import TestProgram from returnn...
[ "def fail (self):\n \n import leoGlobals as g\n \n g.app.unitTestDict[\"fail\"] = g.callerName(2)", "def run_tests(self):\n import pytest\n\n errno = pytest.main([])\n sys.exit(errno)", "def skip_this_extension_module():\n if not run_end_to_end:\n raise unittes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
General function for creating an expression for a smooth minimum or maximum. Uses the smooth_abs operator.
def smooth_minmax(a, b, eps=1e-4, sense='max'): # Check type of eps if not (isinstance(eps, (float, int, Param))): raise TypeError("Smooth {} eps argument must be a float, int or " "Pyomo Param".format(sense)) # Set sense of expression if sense == 'max': mm = 1 ...
[ "def smooth_minmax(a, b, eps=1e-4, sense=\"max\"):\n # Check type of eps\n if not isinstance(eps, (float, int, Param)):\n raise TypeError(\n \"Smooth {} eps argument must be a float, int or \"\n \"Pyomo Param\".format(sense)\n )\n\n # Set sense of expression\n if sens...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Smooth maximum operator, using smooth_abs operator.
def smooth_max(a, b, eps=1e-4): expr = smooth_minmax(a, b, eps, sense='max') return expr
[ "def smooth_max(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense=\"max\")\n return expr", "def max_abs(\n self,\n a,\n axis=None,\n keepdims=False,\n mtol=None,\n split_every=None,\n chunk_function=None,\n ):\n return self.max(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Smooth minimum operator, using smooth_abs operator.
def smooth_min(a, b, eps=1e-4): expr = smooth_minmax(a, b, eps, sense='min') return expr
[ "def smooth_min(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense=\"min\")\n return expr", "def min_abs(\n self,\n a,\n axis=None,\n keepdims=False,\n mtol=None,\n split_every=None,\n chunk_function=None,\n ):\n return self.min(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the log of max(a, eps) using the smooth_max expression. This can be used to avoid transient evaluation errors when changing a model from one state to another. This can be used when at the solution, a >> eps.
def safe_log(a, eps=1e-4): return log(smooth_max(a, eps, eps=eps))
[ "def smooth_max(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense=\"max\")\n return expr", "def smooth_max(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense='max')\n return expr", "def apply_ada_max(var, m, v, grad, lr, beta1, beta1_power, beta2, epsilon, target=utils.CCE):\n\n _che...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the hash portion using base object method, but with no network_id included.
def test_users_hash_no_network_id(self): test_hash = SAMPLE_USER_HASH.copy() test_hash.pop('network_id') self.base_test_hash(test_hash)
[ "def test_hash_id(self):\n self.assertEqual(hash_id(self.id1, self.id2, self.salt, self.length), \"2Y7W5d\")", "def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))", "def test_get_xrp__ripple_block_details_by_block_h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new Vocab with extra tokens prepended.
def add_extra_vocab(self, extra_vocab): extra_tok_counts = [(w, float('inf')) for w in extra_vocab] return Vocab(extra_tok_counts + self.tok_counts, unk_tok=self.unk_tok)
[ "def get_vocab(self) -> torchtext.vocab.Vocab:\n if self.vocab is not None:\n return self.vocab\n else:\n tok_to_idx = list(self.vectorizer.vocabulary_.items())\n tok_to_idx.sort(key = lambda x: x[1])\n ordered_vocab = [ (k,1) for (k,_) in tok_to_idx ]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new Vocab containing the top `size` tokens.
def truncate(self, size): return Vocab(self.tok_counts[:size], unk_tok=self.unk_tok)
[ "def limitVocab(self, max_size):\n if self.VOCAB_SIZE <= max_size:\n print(f'Current vocab size is {self.VOCAB_SIZE}, no need to decrease size')\n return\n# self.word2index = {}\n# # self.word2count = {}\n# self.index2word = {}\n self.VOCAB_SIZE = max_size...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a password hash.
def generate_password_hash(self, password): hash = generate_password_hash(password) return hash
[ "def gen_hash_password(password):\n import random\n letters = 'abcdefghijklmnopqrstuvwxyz0123456789'\n p = ''\n random.seed()\n for x in range(32):\n p += letters[random.randint(0, len(letters)-1)]\n return p", "def hash_password(self, password):\n password = hashpw(password.encode...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a runnable test suite from given datasources and settings.
def TestSuite(datasources, settings): datasources = [utils.abspath(path) for path in datasources] suite = _get_suite(datasources, settings['SuiteNames'], settings['WarnOnSkipped']) suite.set_options(settings) _check_suite_contains_tests(suite, settings['RunEmptySuite']) return suite
[ "def construct_test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(FRAMConnection))\n suite.addTest(unittest.makeSuite(FRAMActions))\n return suite", "def _create_test_suite(test_cases: [unittest.TestCase]) -> unittest.TestSuite:\n suite = unittest.TestSuite()\n\n # A...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that if you create a card with rank 12, its rank_name will be "Queen"
def test_1_queen(self): card = cards.Card(0, 12) self.assertEqual(card.rank_name, "Queen")
[ "def testRankNames(self):\n RN = ['Two', 'Three', 'Four', 'Five', 'Six',\n 'Seven', 'Eight', 'Nine', 'Ten', \n 'Jack', 'Queen', 'King', 'Ace']\n s = \"c\" #testing rank not suit\n for r in range(2,14):\n myCard = Card(r,s)\n self.assertEqual(myCard.rankName(),RN[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that if you create a card instance with suit 1, its suit_name will be "Clubs"
def test_2_club(self): card = cards.Card(1, 2) self.assertEqual(card.suit_name, "Clubs")
[ "def test_1_queen(self):\n card = cards.Card(0, 12)\n self.assertEqual(card.rank_name, \"Queen\")", "def test_Construction(self):\n #Can make jokers (suit None)\n test_card = Card(0, None)\n #Any number given for a joker is set to 0\n test_card = Card(9999, None)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that if you invoke the deal_card method on a deck, it will return a card instance.
def test_5_deal_card_return(self): deck = cards.Deck() card = cards.Card(3, 13).__str__() dealt = deck.deal_card(i=-1).__str__() self.assertEqual(dealt, card)
[ "def test_deal_card(self):\n card = self.deck.cards[-1]\n dealt_card = self.deck.deal_cards()\n self.assertEqual(card, dealt_card)\n self.assertEqual(self.deck.count(), 51)", "def test_deal(deck):\n # get a copy of the top card\n # make sure deal delivers top card of deck\n ca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that if you invoke the deal_card method on a deck, the deck has one fewer cards in it afterwards.
def test_6_deal_card_fewer(self): deck = cards.Deck() original_card = len(deck.cards) deck.deal_card() dealt_card = len(deck.cards) self.assertGreater(original_card, dealt_card)
[ "def test_deal_card(self):\n card = self.deck.cards[-1]\n dealt_card = self.deck.deal_cards()\n self.assertEqual(card, dealt_card)\n self.assertEqual(self.deck.count(), 51)", "def test_deal_sufficient_cards(self):\n cards = self.deck._deal(5)\n self.assertEqual(len(cards)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that if you invoke the replace_card method, the deck has one more card in it afterwards. (Use deal_card function first to remove a card from the deck and then add the same card back in)
def test_7_replace_card_more(self): deck = cards.Deck() removed = deck.deal_card() removed_list = len(deck.cards) deck.replace_card(removed) replaced_list = len(deck.cards) self.assertGreater(replaced_list, removed_list)
[ "def test_deal_card(self):\n card = self.deck.cards[-1]\n dealt_card = self.deck.deal_cards()\n self.assertEqual(card, dealt_card)\n self.assertEqual(self.deck.count(), 51)", "def test_deal(deck):\n # get a copy of the top card\n # make sure deal delivers top card of deck\n ca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns X, y for all images in img_path (list of full path to jpg images) Same parameters as utils.img_to_dataset()
def imgs_to_dataset(img_paths, window_size=10, squeeze=True, resize=100, padding=10): X = list() y = list() for img_path in img_paths: img = Image(img_path, resize=resize, padding=padding) X_, y_ = img_to_dataset(img, window_size=window_size, squeeze=squeeze) X.ext...
[ "def extract_images(paths):\n images = []\n for path in paths:\n ds = cv2.imread(path)\n ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)\n images.append(ds)\n return images", "def gather_images(datasets, batch_img_paths):\r\n n_batch = len(batch_img_paths)\r\n\r\n images = [[] for d ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get 0255 pixel value for an input vector. Returns uint8 vector.
def to_255_scale(vector): return np.round(vector * 255).astype('uint8')
[ "def vectorizar(self):\n img = cv2.cvtColor(self.img,cv2.COLOR_BGR2GRAY)\n self.vector = img.T.flatten().T\n return None", "def getValue (self, row, column):\n value = 0\n try:\n value = __image__ [row, column]\n if value > 255 or value < 0:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds padding to the image. Modifies .data.
def add_padding(self, padding=10, color=0): self.data = cv2.copyMakeBorder(self.data, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=color) return self.data
[ "def AddPadding(data: str) -> str:\n last_block_width = len(data) % 8\n if last_block_width != 0:\n data += (8 - last_block_width) * Base32Const.PADDING_CHAR\n return data", "def _pad_img(self, results):\n img = results[\"img\"]\n if self.size is not None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns gray values for a window around the target pixel at location i, j. The window_size will be rounded. e.g window size of 11 > 5 pixels to left, right, up and down of target Of the window, gray pixel values are turned. The target location's rgb values are returned. The blue, green, and red values represent the res...
def get_dataset_for_pixel(self, i, j, window_size=10, squeeze=True): zeta = int((window_size-1)/2) # The BGR values represent the target features, y b, g, r = self.data[i, j] # Gray represents the predictive features, X gr = self.gray[i - zeta : i + zeta + 1, ...
[ "def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], \n xy_window=(64, 64), xy_overlap=(0.5, 0.5)):\n # If x and/or y start/stop positions not defined, set to image size\n x_start_stop[0] = x_start_stop[0] or 0\n x_start_stop[1] = x_start_stop[1] or img.shape[1]\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A lineage between source and target entities.
def __init__(__self__, *, source: 'outputs.GoogleCloudDatacatalogLineageV1EntityReferenceResponse', target: 'outputs.GoogleCloudDatacatalogLineageV1EntityReferenceResponse'): pulumi.set(__self__, "source", source) pulumi.set(__self__, "target", target)
[ "def target_lines(self):\n target_lines = self._target_source_lines[:]\n deps_begin, deps_end = self._dependencies_interval\n target_lines[deps_begin:deps_end] = self.dependency_lines()\n if self._provides:\n provides_begin, provides_end = self._provides_interval\n target_lines[provides_begin:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Type of the source. Use of a source_type other than `CUSTOM` for process creation or updating is highly discouraged, and may be restricted in the future without notice.
def source_type(self) -> str: return pulumi.get(self, "source_type")
[ "def get_type(self, source: Source):\n return source.type_class", "def source_type(self) -> str:\r\n return SOURCE_TYPE_GPS", "def SetSourceType(self, source_type):\n if self.source_type is None:\n self.source_type = source_type", "def get_source_type(source: str) -> str:\n parsed = u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``put_referenced_message_into`` works as intended.
def test__put_referenced_message_into(): message_id_0 = 202305010029 channel_id_0 = 202305010030 guild_id_0 = 202305010031 content_0 = 'Rot in hell' message_id_1 = 202305010032 channel_id_1 = 202305010033 guild_id_1 = 202305010034 content_1 = 'Afraid' message_0 = Message.pr...
[ "def is_referenced(target):", "def test_publish_message(self):\n pass", "def test_fk_ref_preservation(self):\n self.login()\n article = TestArticle(\n slug = 'article',\n title = 'Title',\n status = PUBLISHED_STATE\n )\n article.save()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will Navigate to the home screen of the device
def navigate_home_mobile(self): if config.desiredCapabilities_mobile['platformName'] == 'android': self.mob_conn.press_keycode(187) elif config.desiredCapabilities_mobile['platformName'] == 'ios': pressHome = {"name": "home"} # self.mob_conn.execute_script("mobile: p...
[ "def click_home(self):\n self.find_element_by_xpath(self.home_xpath).click()", "def set_home(self):\n print(\"Setting home position.\")\n self._command('2H HERE X Y')", "def go_home(self):\n if self.home_url is not None:\n self.set_url(self.home_url)", "def _returnhome(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will go to the previous screen in the mobile device
def go_back_mobile(self): if config.desiredCapabilities_mobile['platformName'] == 'android': self.mob_conn.press_keycode(4) elif config.desiredCapabilities_mobile['platformName'] == 'ios': self.mob_conn.back() return self
[ "def go_previous_page(self):\n\n self.webView.back()", "def go_back():\n pyautogui.moveTo(100, 200)\n pyautogui.click()", "def to_prev_screen(self) -> None:\n if self.game_mode == 'comp' and self.num_players == 2:\n self.reset_num_screen()\n self.parent.current = 'menu'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will change the orientation of the the screen to LandscapeView
def change_orientation_landscape(self): self.mob_conn.orientation = 'LANDSCAPE' return self
[ "def to_landscape(self) -> None:\n if self.is_portrait:\n self.width, self.height = self.height, self.width", "def change_orientation_portrait(self):\n\n self.mob_conn.orientation = 'PORTRAIT'\n return self", "def to_portrait(self) -> None:\n if self.is_landscape:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will change the orientation of the the screen to PortraitView
def change_orientation_portrait(self): self.mob_conn.orientation = 'PORTRAIT' return self
[ "def change_orientation_landscape(self):\n\n self.mob_conn.orientation = 'LANDSCAPE'\n return self", "def to_portrait(self) -> None:\n if self.is_landscape:\n self.width, self.height = self.height, self.width", "def to_landscape(self) -> None:\n if self.is_portrait:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will open the menu/app_drawer of the device (only for Android)
def open_menu_mobile(self): if config.desiredCapabilities_mobile['platformName'] == 'android': self.mob_conn.press_keycode(82) # elif config.desiredCapabilities_mobile['platformName'] == 'ios': # pressBack = {"name": "back"} # self.mob_conn.execute_script("mobile: pr...
[ "def openMenu(self):\n root = tk.Tk()\n menu = Menu(self, master=root)\n menu.mainloop()", "def open_admin_side_menu(self):\n self.click_on_element_by_css(adpl.ADMIN_SIDE_NAVIGATION_MENU)", "def menu(self):\n self.parent.switch_screen(\"Menu\")", "def setMenuMode(string):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Hold the element for a duration of time
def touch_and_hold_element(self, element, time_duration=3000): actions = TouchAction(self.mob_conn) actions.long_press(element, duration=time_duration) actions.perform() return self
[ "def at_repeat(self):\r\n self.obj.blink()", "def hold_piece(self):\r\n if self.pieces[2]:\r\n self.pieces[0], self.pieces[2] = self.pieces[2], self.pieces[0]\r\n else:\r\n self.pieces[:3] = [self.pieces[1], Piece(), self.pieces[0]]\r\n\r\n self.pieces[0].reset()\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will open background apps section
def open_recent_apps(self): if config.desiredCapabilities_mobile['platformName'] == 'android': self.mob_conn.press_keycode(187) # elif config.desiredCapabilities_mobile['platformName'] == 'ios': # params = {"element": element, "name": "back"} # self.mob_conn.execute_...
[ "def the_apps_page_load_open_installed_applications(driver):\n if is_element_present(driver, '//mat-ink-bar[@style=\"visibility: visible; left: 0px; width: 183px;\"]') is False:\n assert wait_on_element(driver, 10, '//div[contains(text(),\"Installed Applications\")]', 'clickable')\n driver.find_ele...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will validate of the app is installed in the mobile device or not
def validate_app_installed(self, appPackage): assert self.mob_conn.is_app_installed( appPackage), f"The Application {appPackage} is not installed in the device." return self
[ "def on_mobile(self):\n ua = get_user_agent(self.request)\n if ua:\n if detect_mobile_browser(ua):\n return True\n else:\n return False\n return False", "def check_devices(self) -> bool:\n\t\tpass", "def _has_widevine(self):\n if se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will start a new activity on the mobile device
def start_new_activity_android(self, appPackage, activityName): self.mob_conn.start_activity(appPackage, activityName) return self
[ "def open_menu_mobile(self):\n\n if config.desiredCapabilities_mobile['platformName'] == 'android':\n self.mob_conn.press_keycode(82)\n # elif config.desiredCapabilities_mobile['platformName'] == 'ios':\n # pressBack = {\"name\": \"back\"}\n # self.mob_conn.execute_scr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the appActivity and appPackage of the current running application
def current_app_info(self): app_info = {} app_activity = self.mob_conn.current_activity app_package = self.mob_conn.current_package app_info['current_activity'] = app_activity app_info['current_package'] = app_package return app_info
[ "def main_activity(self):\n MAIN_ACTIVITY_ACTION = \"android.intent.action.MAIN\"\n\n package = self.package()\n\n for activity in self.activities():\n for intent_filter in activity[\"intent_filters\"]:\n if MAIN_ACTIVITY_ACTION in intent_filter[\"actions\"]:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will send the app to background for a specific amount of time
def send_app_to_background(self, background_time=100): self.mob_conn.background_app(background_time) return self
[ "def background_app(self, duration):\n self._selenium_web_driver().background_app(duration / 1000.0)", "def do_something_every_hour():\n sleep(5)", "def catch_alarm():\n comm_time_to_call_heart_beat = True", "def worker_function(time_left):\r\n timer = TimerApp(time_left)", "async def join_tim...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the given app status on the device
def get_app_status(self, appPackage): state = self.mob_conn.query_app_state(appPackage) if state == 0: return "App not installed" elif state == 1: return "App not running" elif state == 2: return " App running in background or suspended" elif s...
[ "def status_get(self, *, is_app=False):\n return self._run('status-get', '--include-data', f'--application={is_app}')", "def status_get(self, *, is_app=False):\n return self._run('status-get', '--include-data', '--application={}'.format(is_app))", "def getStatus(self):\n exitcode, output = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flick starting at on_element, and moving by the x and y with specified speed.
def flick_mobile_element(self, element, x_coordinate, y_coordinate, speed): self.mob_conn.flick_element(element, x_coordinate, y_coordinate, speed) return self
[ "def update_position(self):\n \t\t\n self.x += math.sin(self.angle) * self.speed\n self.y -= math.cos(self.angle) * self.speed", "def accelerate(self):\r\n self.__x_speed += math.cos(math.radians(self.__direction))\r\n self.__y_speed += math.sin(math.radians(self.__direction))", "def a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the text from Clipboard of the system
def get_text_from_clipboard(self): self.mob_conn.get_clipboard() text_from_clipboard = self.mob_conn.get_clipboard_text() return text_from_clipboard
[ "def tkinter_clipboard_get():\n try:\n from tkinter import Tk, TclError\n except ImportError:\n raise TryNext(\n \"Getting text from the _clipboard on this platform requires tkinter.\"\n )\n\n root = Tk()\n root.withdraw()\n try:\n text = root.clipboard_get()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the capabilities of the specified session
def get_session_capabilities(self): return self.mob_conn.session
[ "def get_capabilities(connection):\n for capability in connection.server_capabilities:\n six.print_(capability)", "def capabilities(self):\n return []", "def getCapabilities(self):\n \n # initialise the request\n resp = CapabilityConfigList()\n \n # make t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Acquire a resource on the semaphore, or else quit after cancellation.
def acquire(self): # print('{}: Getting ready...'.format(self.name)) with self.ready: while not self.cancelled: # print('{}: Trying to acquire...'.format(self.name)) if self.semaphore.acquire(blocking=False): # print('{}: Acquired!'.format(...
[ "def _cancelAcquire(self: _DeferredLockT, d: Deferred[_DeferredLockT]) -> None:\n self.waiting.remove(d)", "def resource_acquiring_iteration(acquired, released, barrier):\n acquired.set()\n try:\n yield 1\n barrier.wait(timeout=TIMEOUT)\n yield 2\n finally:\n released.s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute Unweighted UniFrac using fp64 math and write to file
def unweighted_fp64_to_file(table: str, phylogeny: str, out_filename: str, pcoa_dims: int = 10, threads: int = 1, variance_adjusted: bool = False, bypas...
[ "def weighted_unnormalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute Unweighted UniFrac using fp32 math and write to file
def unweighted_fp32_to_file(table: str, phylogeny: str, out_filename: str, pcoa_dims: int = 10, threads: int = 1, variance_adjusted: bool = False, bypas...
[ "def weighted_unnormalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute weighted normalized UniFrac using fp64 math and write to file
def weighted_normalized_fp64_to_file(table: str, phylogeny: str, out_filename: str, pcoa_dims: int = 10, threads: int = 1, variance_adj...
[ "def weighted_unnormalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute weighted normalized UniFrac using fp32 math and write to file
def weighted_normalized_fp32_to_file(table: str, phylogeny: str, out_filename: str, pcoa_dims: int = 10, threads: int = 1, variance_adj...
[ "def weighted_unnormalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute weighted unnormalized UniFrac using fp64 math and write to file
def weighted_unnormalized_fp64_to_file(table: str, phylogeny: str, out_filename: str, pcoa_dims: int = 10, threads: int = 1, ...
[ "def weighted_normalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute weighted unnormalized UniFrac using fp32 math and write to file
def weighted_unnormalized_fp32_to_file(table: str, phylogeny: str, out_filename: str, pcoa_dims: int = 10, threads: int = 1, ...
[ "def weighted_normalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read UniFrac distance matrix from a hdf5 file
def h5unifrac(h5file: str) -> skbio.DistanceMatrix: with h5py.File(h5file, "r") as f_u: if 'matrix:0' in f_u.keys(): # multi format dm = skbio.DistanceMatrix( f_u['matrix:0'][:, :], [c.decode('ascii') for c in f_u['order'][:]]) else: ...
[ "def read_hdf5_file(self, file_name):\n # if file_name.endswith('.hdf5'):\n stat_file = h5py.File(config.stat_dir+'stats.hdf5', mode='r')\n\n max_feat = np.array(stat_file[\"feats_maximus\"])\n min_feat = np.array(stat_file[\"feats_minimus\"])\n stat_file.close()\n\n with h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read PCoA from a hdf5 file
def h5pcoa(h5file: str) -> skbio.OrdinationResults: with h5py.File(h5file, "r") as f_u: pcoa_method = f_u['pcoa_method'][0].decode('ascii') if 'FSVD' == pcoa_method: long_method_name = "Approximate Principal Coordinate Analysis" + \ " using FSVD" e...
[ "def h5pcoa_all(h5file: str) -> tuple:\n\n with h5py.File(h5file, \"r\") as f_u:\n pcoa_method = f_u['pcoa_method'][0].decode('ascii')\n if 'FSVD' == pcoa_method:\n long_method_name = \"Approximate Principal Coordinate Analysis\" + \\\n \" using FSVD\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read all PCoAs from a hdf5 file
def h5pcoa_all(h5file: str) -> tuple: with h5py.File(h5file, "r") as f_u: pcoa_method = f_u['pcoa_method'][0].decode('ascii') if 'FSVD' == pcoa_method: long_method_name = "Approximate Principal Coordinate Analysis" + \ " using FSVD" else: ...
[ "def h5pcoa(h5file: str) -> skbio.OrdinationResults:\n\n with h5py.File(h5file, \"r\") as f_u:\n pcoa_method = f_u['pcoa_method'][0].decode('ascii')\n if 'FSVD' == pcoa_method:\n long_method_name = \"Approximate Principal Coordinate Analysis\" + \\\n \" usin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read first PERMANOVA statistical test from a hdf5 file As describe in scikitbio skbio.stats.distance.permanova.py, Permutational Multivariate Analysis of Variance (PERMANOVA) is a nonparametric method that tests whether two or more groups of objects are significantly different based on a categorical factor.
def h5permanova(h5file: str) -> pd.Series: found = False with h5py.File(h5file, "r") as f_u: methods = f_u['stat_methods'][:] test_names = f_u['stat_test_names'][:] values = f_u['stat_values'][:] pvalues = f_u['stat_pvalues'][:] n_permutations = f_u['stat_n_permutations'...
[ "def h5permanova_dict(h5file: str) -> dict:\n\n pmns = {}\n with h5py.File(h5file, \"r\") as f_u:\n methods = f_u['stat_methods'][:]\n test_names = f_u['stat_test_names'][:]\n grouping_names = f_u['stat_grouping_names'][:]\n values = f_u['stat_values'][:]\n pvalues = f_u['st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read PERMANOVA statistical tests from a hdf5 file As describe in scikitbio skbio.stats.distance.permanova.py, Permutational Multivariate Analysis of Variance (PERMANOVA) is a nonparametric method that tests whether two or more groups of objects are significantly different based on a categorical factor.
def h5permanova_dict(h5file: str) -> dict: pmns = {} with h5py.File(h5file, "r") as f_u: methods = f_u['stat_methods'][:] test_names = f_u['stat_test_names'][:] grouping_names = f_u['stat_grouping_names'][:] values = f_u['stat_values'][:] pvalues = f_u['stat_pvalues'][:]...
[ "def h5permanova(h5file: str) -> pd.Series:\n\n found = False\n with h5py.File(h5file, \"r\") as f_u:\n methods = f_u['stat_methods'][:]\n test_names = f_u['stat_test_names'][:]\n values = f_u['stat_values'][:]\n pvalues = f_u['stat_pvalues'][:]\n n_permutations = f_u['stat_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generates a string which denotes a Eulerian cycle within the graph
def eulerian_cycle(graph, random_start=True, choice_start=0): if len(graph) == 0 or len(graph) == 1: return "" stack, vertices, visited_edges = [], [], [] # deciding where to start by generating a random start location, if random_start is set to True; # at the same time, check if there are any...
[ "def euler26(d):\n max_cycle = 0\n max_cycle_denominator = 0\n\n for i in range(1, d):\n length = cycle_length(i)\n if length > max_cycle:\n max_cycle = length\n max_cycle_denominator = i\n\n return \"1/{0} has the max cycle, with a length of {1}\".format(max_cycle_de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a count of the number of edges/inputs that are leaving the vertex
def num_of_outputs(vertex, graph): if vertex in graph: # simply count return len(graph[vertex]) return 0
[ "def out_edge_count(self):", "def number_of_edges(self) -> int:\n count = 0\n for vertice in self.__graph:\n count += len(self.__graph[vertice])\n return count // 2", "def number_of_interface_vertices(self):\n return sum(len(attr['interface_points']) for u, v, attr in self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator for views that catch ObjectDoesNotExist Exception. if redirect is None, raise Http404 Exception, otherwise Redirect.
def object_does_not_exist(view_func=None, redirect=None): def decorator(view_func): @wraps(view_func) def _wrapped_view(request, *args, **kwargs): try: return view_func(request, *args, **kwargs) except ObjectDoesNotExist: if redirect: ...
[ "def redirect_error_handler(redirect_path: str, exception: Exception, **kwargs) -> RedirectResponse:\n return RedirectResponse(urls.with_query_params(redirect_path, error=exception, **kwargs))", "def handle404(request):\n\n if request.path.endswith('/'):\n fixed_path = request.path[:-1]\n quer...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read csv file from PECO into a pandas dataframe
def read_PECO_csv(datafile): if hasattr(datafile, 'read'): # Read buffer directly df = pandas.read_csv(datafile, skiprows=4) else: # Read in usage log (csv format, probably specific to PECO) df = pandas.read_csv(root+datafile+'.csv', skiprows=4) # Convert costs (...
[ "def load_csv():\n\ttry:\n\t\tdf = pd.read_csv(DATASET_CSV_PATH)\n\texcept:\n\t\tprint('Error reading %s. Make sure file exists or try to regenerate it using generate_csv() method.')\n\t\tdf = pd.DataFrame()\n\n\treturn df", "def load_data(path):\n\n df = pd.read_csv(path)\n return df", "def load_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read xml file in GB format
def read_GB_xml(datafile): from BeautifulSoup import BeautifulStoneSoup if hasattr(datafile, 'read'): # Read buffer directly soup = BeautifulStoneSoup(datafile.read()) else: # Read in usage log (csv format, probably specific to PECO) with open(datafile) as f: ...
[ "def read_xml_file(self, xml_fn):\n pass", "def read_xml_file(self):\r\n\r\n #Find the root of xml tree.\r\n xml_tree = ET.parse(self.xml_file_path + \"pic{}.xml\".format(self.file_index))\r\n root = xml_tree.getroot()\r\n\r\n return root", "def read_xml(path_to_xml, verbose=T...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create density cloud of data for a given tag or group of tags
def density_cloud_by_tags(df, columns, silent=False): figures = [] if columns == 'hr' or 'hr' in columns: raise ValueError("Columns cannot contain hr tag") # Create a profile for day of week maxY = df['USAGE'].max() for label, data in df.groupby(columns): # Find...
[ "def tag_cloud(self, steps=4, distribution=LOGARITHMIC, filters=None, min_count=None):\r\n\t\t\r\n\t\tfrom utils import calculate_cloud\r\n\t\t\r\n\t\treturn calculate_cloud(Tagging, steps, distribution)", "def density_cluster(Data,iradius, Clusters): #This function classifies data points i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a green button dataframe, price that energy at PJM pnodes
def price_at_pnodes(df, pnodes): for pnode in pnodes: # Bring in PJM prices from DataMiner pnode_prices = pandas.read_csv(root+'pnode_data/%s.csv' % pnode) assert len(pnode_prices['PRICINGTYPE'].unique()) == 1 assert pnode_prices['PRICINGTYPE'].unique()[0] == 'TotalLMP' ...
[ "def add_prices(self):\n for i in range(self.parameters[\"number_of_products\"]):\n self.product_space.nodes[i][\"price\"] = \\\n self.product_space.nodes[i][\"delta\"] / max(\n self.product_space.nodes[i][\"firms\"], 1)", "def price(self, p, g, kind='var'):\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test google email and link expiration are in id_token for a linked account
def test_google_id_token_not_linked(oauth_test_client): data = {"confirm": "yes"} oauth_test_client.authorize(data=data) tokens = oauth_test_client.token() id_token = jwt.decode( tokens.id_token, options={"verify_signature": False}, algorithms=["RS256"] ) assert id_token["context"]["user...
[ "def test_google_id_token_linked(db_session, encoded_creds_jwt, oauth_test_client):\n user_id = encoded_creds_jwt[\"user_id\"]\n proxy_group_id = encoded_creds_jwt[\"proxy_group_id\"]\n\n original_expiration = 1000\n google_account = \"some-authed-google-account@gmail.com\"\n\n # add google account a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test google email and link expiration are in id_token for a linked account
def test_google_id_token_linked(db_session, encoded_creds_jwt, oauth_test_client): user_id = encoded_creds_jwt["user_id"] proxy_group_id = encoded_creds_jwt["proxy_group_id"] original_expiration = 1000 google_account = "some-authed-google-account@gmail.com" # add google account and link existi...
[ "def test_google_id_token_not_linked(oauth_test_client):\n data = {\"confirm\": \"yes\"}\n oauth_test_client.authorize(data=data)\n tokens = oauth_test_client.token()\n id_token = jwt.decode(\n tokens.id_token, options={\"verify_signature\": False}, algorithms=[\"RS256\"]\n )\n assert id_to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles GET requests. Specifically handles "/restaurant" path to print restaurant names.
def do_GET(self): try: if self.path.endswith("/restaurants"): self.send_response(200) self.send_header('Content-type', 'text/html; charset=utf-8') self.end_headers() all_restaurants = session.query(Restaurant).all() ou...
[ "def do_GET(self):\n path = self.path\n name = path[1:] # strip the leading slash\n \n # if the path is the name of a known pokemon, get its html string and construct the response:\n if name in self.pokemon_dictionary:\n self.send_response(http.HTTPStatus.OK)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to resolve the given absolute or relative ``path``. If it doesn't exist as is, tries to create an absolute path using the ``relative_prefix``. If that fails, tries relative/absolute versions with each of ``possible_extensions``.
def resolve_possible_paths(path, relative_prefix, possible_extensions=None, leading_underscore=False): possible_extensions = [''] + list(possible_extensions) if possible_extensions else [''] possible_paths = [path + e if os.path.isabs(path + e) else os.path.join(relative_prefix, path + e) ...
[ "def resolvePath(path):\n global prefix\n if os.path.isabs(path):\n return path\n return os.path.abspath(os.path.join(prefix, path))", "def _resolve_relative_path(self, path):\n if not os.path.isabs(path):\n return os.path.join(self._relpath_root, path)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert a PPI_preview in the database. The id of the PPI_preview is updated
def create_ppi_preview(self): ppi_id = None sqlObj = _PPIpreview_sql_new() ppi_id = sqlObj.insert_PPI(self.score_ppi_prev, self.type_ppi_prev, self.fk_couple, self.fk_prot_bact, self.fk_prot_phage) self.id_ppi_prev = ppi_id return ppi_id
[ "def write_preview(self, previewmeta):\n\n if (isinstance(previewmeta['db_plate_id'], int) and \n (previewmeta['db_plate_id'] > 0)):\n plate_id = previewmeta['db_plate_id']\n else:\n plate_id = self.get_plate_id(previewmeta['plate_num'],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all PPI scores grouped in a array given its couple id
def get_ppi_preview_scores_grouped_by_couple_id(couple_id): list_scores_PPI = [] sqlObj = _PPIpreview_sql_new() results = sqlObj.select_all_ppi_preview_grouped_by_couple_id(couple_id) for element in results: list_scores_PPI.append(int(element[2])) return list_scores_P...
[ "def _get_scores(self):\n a = numpy.array([x['scores'] for x in self.results])\n return a", "def score(self, pairs):\n pass", "def get_rhos(loan_repaid_probs, scores):\n n_scores = len(scores)\n n_groups = len(loan_repaid_probs)\n rhos = np.zeros((n_groups, n_scores))\n for j, s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all PPI preview couple treated
def get_all_ppi_preview_couple(): list_scores_PPI_fk_couple = [] sqlObj = _PPIpreview_sql_new() results = sqlObj.select_all_ppi_preview_fk_couples() for element in results: list_scores_PPI_fk_couple.append(element[0]) return list_scores_PPI_fk_couple
[ "def getPronunciations(self):\n pass", "def data_for_paragraph_selector(self): #TODO maybe, if you're bored and there is another lockdown, rename this.\n result = []\n for point in self.data:\n # supp_facts = set([fact[0] for fact in point[\"supporting_facts\"]])\n\n sup...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the max ppi score obtained in the DB
def get_max_ppi_score(): list_scores_PPI_fk_couple = [] sqlObj = _PPIpreview_sql_new() results = sqlObj.select_all_score_PPI() for element in results: list_scores_PPI_fk_couple.append(element[2]) max_value = max(list_scores_PPI_fk_couple) return max_value
[ "def max_objective_score(self):\r\n return self.data.maxObjectivePlayerScore", "def personal_best(self) -> int:\n return max(self._scores)", "def get_max_score(self):\n return sum(self.maxpoints.values())", "def max_team_score(self):\r\n return self.data.maxTeamObjective", "def g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the ppi score given the bacterium and phage protein ids
def get_number_ppi_score_by_bact_phage_prots(fk_prot_bac, fk_prot_phage): sqlObj = _PPIpreview_sql_new() results = sqlObj.count_ppi_preview_by_ids_ppi(fk_prot_bac, fk_prot_phage) return results[0][0]
[ "def score(PDBfile):\n from pro_angle import find_residue\n from Bio.PDB.PDBParser import PDBParser\n from pro_length import length\n import os\n import string\n\n score = 0 #initialize \n pars = PDBParser(PERMISSIVE = 1)\n struct = pars.get_structure(PDBfile.rstrip('.pdb'), PDBfile)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
remove a PPI_preview given the protein id
def remove_PPI_preview_by_protein_id(id_protein): sqlObj = _PPIpreview_sql_new() id_couple = sqlObj.remove_PPI_preview_by_prot_id(id_protein) return id_couple
[ "def del_plate(self):\n removed_plate = Plate(self._args.plate_id, plate=self.plates.pop(self._args.plate_id))\n write_file(self._args.plate_file, self.plates)\n print('Successfully removed the plate!')\n if removed_plate.wells:\n display(removed_plate)", "def remove(self, (...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
replace image's pixel in specific area
def replace(self, img, dst_clr): for i in range(80, 340): #x1 x2 for j in range(500, 800): #y1 y2 img[j][i] = dst_clr return img
[ "def replace_fast(self, img, dst_clr):\n img[535:750, :290, :] = dst_clr #h(y) w(x) c\n img[575:705, 900:, :] = dst_clr\n return img", "def rescaled_image():", "def replace(im,colo1,colo2):\n try:\n (x0,y0,x1,y1) = im.getbbox()\n except:\n print \"you need to give me a i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fast replace image's pixel in specific area
def replace_fast(self, img, dst_clr): img[535:750, :290, :] = dst_clr #h(y) w(x) c img[575:705, 900:, :] = dst_clr return img
[ "def replace(self, img, dst_clr):\n for i in range(80, 340): #x1 x2\n for j in range(500, 800): #y1 y2\n img[j][i] = dst_clr\n return img", "def _replace(img, old_color, new_color):\n img_data = img.load()\n for y in range(img.size[1]):\n for x in r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
write images after replace ..
def read_write_img(self): for file in os.listdir(self.path): filelist = input_path + file img = cv2.imread(filelist) dst_img = self.replace_fast(img, (0, 0, 0)) # cv2.imwrite(out_path + file[:-4] + '.jpg', re_img) plt.subplot(121), plt.imshow(img), plt...
[ "def new_image(image):\n os.replace(image,PICTURES_IN + image)\n return", "def write_images(self):\n while self.cache:\n # pop the first and write it out\n fn, image = self.cache.pop(0)\n tifffile.imwrite(fn, image)", "def write_images():\n dataset = NTU_RGB_D(DA...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a mapping of elements to reference keys A list is returned, with each element being a dictionary with entries 'refdata' containing data for (possibly) multiple references, and 'elements' which is a list of element Z numbers that those references apply to
def compact_references(basis_dict, reffile_path): ref_data = io.read_references(reffile_path) element_ref_map = [] # Create a dictionary of elements -> refdata for el, eldata in basis_dict['basisSetElements'].items(): ref = sorted(eldata['elementReferences']) for x in element_ref_map...
[ "def extract_references(elem):\n wos_id = extract_wos_id(elem)\n references = elem.findall('./static_data/fullrecord_metadata/references/reference')\n ref_list = list()\n for reference in references:\n ref_dict = dict()\n for tag in ['uid', 'citedAuthor', 'year', 'page',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate isocurve from 2D data using marching squares algorithm.
def isocurve(data, level, connected=False, extend_to_edge=False): # This function is SLOW; plenty of room for optimization here. if extend_to_edge: d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype) d2[1:-1, 1:-1] = data d2[0, 1:-1] = data[0] d2[-1, 1:-1...
[ "def isocurve(data, level, connected=False, extendToEdge=False, path=False): \n \n if path is True:\n connected = True\n \n if extendToEdge:\n d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype)\n d2[1:-1, 1:-1] = data\n d2[0, 1:-1] = data[0]\n d2[-1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a protorpc Message into a list suitable for PBLite.
def MessageToPBLiteList(msg): index_keys = dict([(f.number, f.name) for f in msg.all_fields()]) if not index_keys: return [] max_index = max(index_keys.keys()) json_list = [None] * max_index for index, key in index_keys.iteritems(): value = getattr(msg, key, None) if isinstance(value, messages.M...
[ "def split_eap_message(eap_messages: bytes) -> list:\n if len(eap_messages) < 253:\n return [eap_messages]\n _stop = len(eap_messages)\n _step = 253\n return [eap_messages[pos:pos+_step] for pos in range(0, _stop, _step)]", "def decode_all(cls, buf):\n msg_list = []\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple threadsafe memoization decorator. Uses a repr() of the params. This will be ok unless there is a custom __repr__ that obscures important information.
def _Memoize(func): l = threading.Lock() cache = {} def _Caller(*args, **kwargs): with l: params = repr((args, kwargs)) try: return cache[params] except KeyError: result = func(*args, **kwargs) cache[params] = result return result return _Caller
[ "def memoize_mutable(f):\n memo = {}\n def wrapper(*args, **kwargs):\n key = pickle.dumps(args) + pickle.dumps(kwargs) #To use as hash for mutable objects.\n if not key in memo:\n memo[key] = f(*args, **kwargs)\n #print(f'Calculated \"{f.__name__}\" for args: {str(args)[:10...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load devshell credentials from the proxy. Also sets various attributes on the credential object expected by other parties.
def LoadDevshellCredentials(): try: return DevshellCredentials( user_agent=config.CLOUDSDK_USER_AGENT,) except Exception: # pylint:disable=broad-except, any problem means None return None
[ "def __LoadAuthCredentials(self):\n return super(DfpClient, self)._LoadAuthCredentials()", "def Load():\n if Check(): # exists and has valid refresh so load it\n credentials = json.loads(os.environ.get(Varname()))\n return credentials", "def patch_using_env(self):\n if self.cred_prop...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add metric_id to the df if it isn't already in there
def define_metric(df, source): valid_sources = ['dalynator', 'codem', 'epi', 'como', 'dismod'] assert source in valid_sources, "Must pass one of %s" % valid_sources if 'metric_id' not in df.columns: met_map = pd.read_csv('%s/bin/get_pct_change_helpers/' 'source_metric_m...
[ "def add_integrity_metric(self, metric):\n if metric is None:\n return\n\n for m in self._integrity_metrics:\n if metric == m:\n # add to existing metric\n m.merge(metric)\n break\n else:\n self._integrity_metrics.add...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
init udp connecting afterwards udp connection is available via remote_control.udp REQUIRED BEFORE USING OTHER FUNCTIONS returns udp connection
def init_udp_connection(): global udp udp = UDPConnection() return udp
[ "def init_UDP_connection(self):\n import socket\n\n self.sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock_recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock_recv.bind((RIO_IP, UDP_RECV_PORT))", "async def connect(self) -> None:\n udp_clie...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get data from the recording PC get_commands e.g. Command.GET_FZ or Command.GET_VERSION ....
def get_data(get_command): udp.send(get_command) d = udp.receive(1) try: return loads(d[len(Command.VALUE):]) except: return None
[ "def onGetCmds(self):\n strJsonResponse = \"\"\n self.dataLock.acquire()\n\n cmds = dict()\n cmds[\"Commands\"] = []\n for cmd in self.commandMap.values():\n desc = cmd.describe(False)\n cmdURL = \"/cmd?name={}\".format(desc[\"Command\"][\"Name\"])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
polling for multiple events e.g. [Command.CHANGED_LEVEL, Command.CHANGED_LEVEL2] returns tuple (event_type, event_type_data) or (None, None)
def poll_multiple_events(event_type_list): rcv = udp.poll() if rcv is not None: for event_type in event_type_list: if bytes_startswith(rcv, event_type): x = loads(rcv[len(event_type):]) return (event_type, x) return (None, None)
[ "def events(self):\n if self.connection_active:\n if self.buff_out:\n event = select.POLLOUT\n else:\n event = select.POLLIN\n else:\n event = select.POLLOUT\n\n return event", "def get_result_events(self, event_type=None, respons...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the Authorization header is generated 61 seconds in the past, then a 401 is returned
def test_if_61_seconds_in_past_401_returned(api_client): past = timezone.now() - datetime.timedelta(seconds=61) with freeze_time(past): auth = auth_sender().request_header response = api_client.get( reverse('activity-stream'), content_type='', HTTP_AUTHORIZATION=auth, ...
[ "def test_if_61_seconds_in_past_401_returned(api_client):\n past = datetime.datetime.now() - datetime.timedelta(seconds=61)\n with freeze_time(past):\n auth = _auth_sender().request_header\n response = api_client.get(\n reverse('activity-stream:activity-stream'),\n content_type='',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the input matrix by adding column headers and padding matrix with 0s to keep it a perfect square
def _pad_matrix(self): for row in self.matrix: row.insert(0, 0) column_headers = [] for j in range(len(self.matrix[0])): if j == 0: # insert header node column_headers.append('H') else: # insert column headers ...
[ "def pad_matrix(M):\n m, n = len(M), len(M[0])\n b = 1\n while b < max(m, n):\n b <<= 1\n M += [[0] * n for _ in range(b - m)]\n for i in range(b):\n M[i] += [0] * (b - n)\n return M", "def pad_matrix(self, matrix, pad_value=0):\n max_columns = 0\n total_rows = len(matrix)\n for r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts all column headers and cells with 1s to Nodes
def _create_nodes(self): nodes = [] for i in range(len(self.matrix)): for j in range(len(self.matrix[i])): value = self.matrix[i][j] # Nothing to do if value == 0: continue node = None # co...
[ "def _initialize(self, matrix: List[List[int]], column_names: Optional[Iterable[AnyStr]] = None) -> None:\n if not matrix:\n return\n\n if column_names is None:\n num_columns = len(matrix[0])\n if num_columns <= 26:\n column_names = (chr(ord('A') + i) fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a link between nodes that are connected to the left, right, up and down. Additionally, each DancingNode is referenced to a ColumnNode
def _create_links_between_nodes(self, nodes): for node in nodes: node.left = self._get_left(node.row_id, node.column_id) node.right = self._get_right(node.row_id, node.column_id) # header node does not need up or down links if node.value != 'H': n...
[ "def __link_nodes(self):\n def __link_north(node):\n if node.x is 0:\n return\n\n pos = (node.x - 1, node.y)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the column header of the node at column
def _get_column_header(self, column): return self.matrix[0][column]
[ "def _get_header_column_letter(self, title):\n return self._to_letter(self._get_header_index(title))", "def get_header_cell(self):\n return self.heading.center(self.width)[:self.width]", "def _get_header_column_number(self, title):\n return self._get_header_index(title) + 1", "def _get_co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform input .py file if provided, otherwise run simple_demo. Argument is assumed to be a syntactically valid Python module.
def main(): if len(sys.argv) < 2: transform_module(EXAMPLE_BODY) else: module = sys.argv[1] with open(transform_module, 'r') as f: transform_module(f.read(), module)
[ "def runpy(self, name, contents):\n NAME = \"t_run\"\n f = open(\"%s.py\" % name, \"w\")\n f.write(contents)\n f.close()\n\n import importlib\n m = importlib.import_module(name)\n return m # the module instance", "def main():\n # type: (str) -> None\n set_tes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send invitation to phone number. confirmation code is deterministic based on team info
def post(user): data = request.get_json() try: number = parse(data["phone_number"], "US") except NumberParseException: message = "The number supplied does not seem to be valid. Please try again." print(message) return make_response(jsonify({"message": message}), 400) ...
[ "def send_invite(recipient, recipient_name, sender, sender_name, base_url, id):\n\n url = base_url.strip('/') + '/' + id\n invite_msg = \"\"\"\nDear {recp_name}:\n\n{sender_name} is inviting you to use Minion ({url}). Minion is a security testing framework \\\nbuilt by Mozilla to bridge the gap between develo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }