query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Adds the occurrence count of words in the user provided watch word list to the pandas dataframe
def _add_watch_word_features_to_documents(self,text_df,doc_name_to_id_dict,watch_word_dict): for doc_name,row_id in doc_name_to_id_dict.iteritems(): if doc_name in watch_word_dict: watch_word_count=watch_word_dict[doc_name][0] logger.debug("Wor...
[ "def wordCount(wordListDF):\r\n wordsDF= wordListDF.groupBy('word').count()\r\n return wordsDF", "def word_count(db, word, dates):\n counts = []\n for date in dates:\n for _, count in data(db, \"word_ondate\", date, word):\n if count:\n counts.append(count)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts simple punctuation and capitalisation countbased features from documents
def _extract_token_features_from_text(self, corpus_list, doc_name_to_id_dict): ''' Go through the documents and extract simple punctuation and lexical features (capitalisation, count of punctuation) ''' doc_count=0 token_feature_dict=defaultdict(list) for doc_nam...
[ "def lexicon_features(tokens, feats): \n #TODO\n feats['pos_words']=0\n feats['neg_words']=0\n for n in tokens:\n i = n.lower()\n if(i in pos_words):\n feats['pos_words'] += 1\n elif(i in neg_words):\n feats['neg_words'] += 1\n else:\n pass", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts several simple sentiment features from a document. I count the number of positive and negative sentiment words in a document, the number, the count of the longest run of positives/negatives and the overall polarity of the document. These features are attempting
def _extract_sentiment_from_text(self, corpus_list, doc_name_to_id_dict): vader = SentimentIntensityAnalyzer() ''' Go through the documents and rate their sentiment ''' doc_count=0 sentiment_feature_dict=defaultdict(list) for doc_name, row_id in doc_name_to_id_dic...
[ "def otherFeatures(tweet):\r\n sentiment = sentiment_analyzer.polarity_scores(tweet)\r\n words = textclean(tweet) \r\n \r\n syllables = textstat.syllable_count(words)\r\n num_chars = sum(len(w) for w in words)\r\n num_chars_total = len(tweet)\r\n num_terms = len(tweet.split())\r\n num_words ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds satire/nonsatire labels to the correct documents in the pandas dataframe
def _add_labels_to_documents(self,text_df,doc_name_to_id_dict,labels_dict): logger.info("Adding labels to documents ...") for doc_name,row_id in doc_name_to_id_dict.iteritems(): if doc_name in labels_dict: label=labels_dict[doc_name] logger...
[ "def add_target_labels(self,train):\r\n df_labels = self.rl_kpis_sites_df[[\"datetime\", \"site_id\", \"mlid\"]]\r\n\r\n # Prepare columns for the following days. We will join data with these columns to find RLF\r\n prediction_interval = 5\r\n for i in range(prediction_interval):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalises sparse features so that their maximum is 1.0 while retaining sparsity.
def _normalise_sparse_features(self,text_features_df,scaler=None): text_features_without_labels=text_features_df.loc[:,text_features_df.columns != 'Label'].values if not scaler: scaler = preprocessing.MaxAbsScaler().fit(text_features_without_labels) text_features_without_labels...
[ "def feature_normalize(sparse_matrix):\n row_sum = np.array(sparse_matrix.sum(1))\n row_norm = np.power(row_sum, -1).flatten()\n row_norm[np.isinf(row_norm)] = 0.\n row_matrix_norm = sp.diags(row_norm)\n sparse_matrix = row_matrix_norm.dot(sparse_matrix)\n return sparse_matrix", "def scaleFeatures():\n num...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the f1 score on a test dataset using the best model found during crossvalidation
def _test(self,testing_features_df,best_models_dict): best_model=best_models_dict['GaussianNB'] pred=best_model.predict(testing_features_df.loc[:,testing_features_df.columns != 'Label'].values) score=metrics.f1_score(testing_features_df['Label'].values,pred) logger.info("F1-score on the ...
[ "def f1_score(predictions, labels, toplabel=1):\n TP, FP, FN, TN = accu_table(predictions, labels, toplabel)\n precision = TP/(TP+FP+.01)\n recall = TP/(TP+FN+.01)\n return 2*precision*recall/(precision+recall+.01)*100", "def prediction(X_train, y_train):\n assert X_train.shape[0] == y_train.shape[0], \"d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the class weights based on the number of positive and negative exemplars in the training dataset Can be useful for classifiers such as the SVM so as to balance the classes when learning the hyperplanes.
def get_class_weights(positive_count, negative_count): logger.info("Number of positive exemplars: " + str(positive_count)) logger.info("Number of negative exemplars: " + str(negative_count)) if positive_count>=negative_count: negative_weight=int(float(positive_count)/float(negative_...
[ "def _compute_weights(labels: Collection):\n class_support = np.unique(labels, return_counts=True)[1]\n class_frequencies = class_support / len(labels)\n # Class weights are the inverse of the class frequencies\n class_weights = 1 / class_frequencies\n # Normalize vector to sum up...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encrypting message using RSA in CBC Mode
def encrypt_RSA(message, pub): return rsa.encrypt(message, pub)
[ "def rsa_encrypt(msg, public_key):\n pass", "def rsa_encrypt(msg, e, n, k=3):\r\n msg = txt2num(msg, k)\r\n encrypt = (msg **e) % n\r\n return encrypt", "def encrypt(self, public_key, message):", "def encrypt(data: bytes, mode: EncryptionMode, rec_pubkey: RSA.RsaKey) -> bytes:\n\n session_key =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decrypting message using RSA in CBC Mode
def decrypt_RSA(message, priv): return rsa.decrypt(message, priv)
[ "def rsa_decrypt(msg, private_key):\n pass", "def rsa_decrypt(ciphertext: bytes) -> dict:\n rsa_cipher = rsa_info['cipher']\n plaintext = rsa_cipher.decrypt(ciphertext)\n return plaintext", "def decrypt(self, private_key, cipher_text):", "def decrypt(message, private_key):\n return private_key....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if there is an existing filtered_location. We check for this by querying the database for an existing entry with the same timestamp. Note that we cannot check for the write_ts because in case the stream is filtered on the client, the write_ts for the filtered location may be slightly different.
def check_existing_filtered_location(timeseries, entry): existing_duplicate = timeseries.get_entry_at_ts("background/filtered_location", "data.ts", entry.ts) if existing_duplicate is not None: return True else: return False
[ "def has_location_changed(self):\n try:\n if self.get_twilight_times_by_day(-1) == (self.latitude, self.longitude):\n return False\n except Exception:\n return True", "def _location_match(self, loc):\n preprocessed_loc = {\"lat\": loc['lat'], \"long\": loc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test block binary sensor.
async def test_block_binary_sensor( hass: HomeAssistant, mock_block_device, monkeypatch ) -> None: entity_id = f"{BINARY_SENSOR_DOMAIN}.test_name_channel_1_overpowering" await init_integration(hass, 1) assert hass.states.get(entity_id).state == STATE_OFF monkeypatch.setattr(mock_block_device.block...
[ "async def test_block_rest_binary_sensor(\n hass: HomeAssistant, freezer: FrozenDateTimeFactory, mock_block_device, monkeypatch\n) -> None:\n entity_id = register_entity(hass, BINARY_SENSOR_DOMAIN, \"test_name_cloud\", \"cloud\")\n monkeypatch.setitem(mock_block_device.status, \"cloud\", {\"connected\": Fa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test block binary sensor extra state attributes.
async def test_block_binary_sensor_extra_state_attr( hass: HomeAssistant, mock_block_device, monkeypatch ) -> None: entity_id = f"{BINARY_SENSOR_DOMAIN}.test_name_gas" await init_integration(hass, 1) state = hass.states.get(entity_id) assert state.state == STATE_ON assert state.attributes.get("...
[ "def device_state_attributes(self):", "async def test_sensor_state(hass: HomeAssistant) -> None:\n prior = 0.2\n config = {\n \"binary_sensor\": {\n \"name\": \"Test_Binary\",\n \"platform\": \"bayesian\",\n \"observations\": [\n {\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test block REST binary sensor.
async def test_block_rest_binary_sensor( hass: HomeAssistant, freezer: FrozenDateTimeFactory, mock_block_device, monkeypatch ) -> None: entity_id = register_entity(hass, BINARY_SENSOR_DOMAIN, "test_name_cloud", "cloud") monkeypatch.setitem(mock_block_device.status, "cloud", {"connected": False}) await i...
[ "async def test_block_rest_binary_sensor_connected_battery_devices(\n hass: HomeAssistant, freezer: FrozenDateTimeFactory, mock_block_device, monkeypatch\n) -> None:\n entity_id = register_entity(hass, BINARY_SENSOR_DOMAIN, \"test_name_cloud\", \"cloud\")\n monkeypatch.setitem(mock_block_device.status, \"c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test block REST binary sensor for connected battery devices.
async def test_block_rest_binary_sensor_connected_battery_devices( hass: HomeAssistant, freezer: FrozenDateTimeFactory, mock_block_device, monkeypatch ) -> None: entity_id = register_entity(hass, BINARY_SENSOR_DOMAIN, "test_name_cloud", "cloud") monkeypatch.setitem(mock_block_device.status, "cloud", {"conne...
[ "async def test_block_rest_binary_sensor(\n hass: HomeAssistant, freezer: FrozenDateTimeFactory, mock_block_device, monkeypatch\n) -> None:\n entity_id = register_entity(hass, BINARY_SENSOR_DOMAIN, \"test_name_cloud\", \"cloud\")\n monkeypatch.setitem(mock_block_device.status, \"cloud\", {\"connected\": Fa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test block sleeping binary sensor.
async def test_block_sleeping_binary_sensor( hass: HomeAssistant, mock_block_device, monkeypatch ) -> None: entity_id = f"{BINARY_SENSOR_DOMAIN}.test_name_motion" await init_integration(hass, 1, sleep_period=1000) # Sensor should be created when device is online assert hass.states.get(entity_id) is...
[ "async def test_block_binary_sensor(\n hass: HomeAssistant, mock_block_device, monkeypatch\n) -> None:\n entity_id = f\"{BINARY_SENSOR_DOMAIN}.test_name_channel_1_overpowering\"\n await init_integration(hass, 1)\n\n assert hass.states.get(entity_id).state == STATE_OFF\n\n monkeypatch.setattr(mock_blo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test block restored sleeping binary sensor.
async def test_block_restored_sleeping_binary_sensor( hass: HomeAssistant, mock_block_device, device_reg, monkeypatch ) -> None: entry = await init_integration(hass, 1, sleep_period=1000, skip_setup=True) register_device(device_reg, entry) entity_id = register_entity( hass, BINARY_SENSOR_DOMAIN,...
[ "async def test_block_restored_sleeping_binary_sensor_no_last_state(\n hass: HomeAssistant, mock_block_device, device_reg, monkeypatch\n) -> None:\n entry = await init_integration(hass, 1, sleep_period=1000, skip_setup=True)\n register_device(device_reg, entry)\n entity_id = register_entity(\n ha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test block restored sleeping binary sensor missing last state.
async def test_block_restored_sleeping_binary_sensor_no_last_state( hass: HomeAssistant, mock_block_device, device_reg, monkeypatch ) -> None: entry = await init_integration(hass, 1, sleep_period=1000, skip_setup=True) register_device(device_reg, entry) entity_id = register_entity( hass, BINARY_...
[ "async def test_block_restored_sleeping_binary_sensor(\n hass: HomeAssistant, mock_block_device, device_reg, monkeypatch\n) -> None:\n entry = await init_integration(hass, 1, sleep_period=1000, skip_setup=True)\n register_device(device_reg, entry)\n entity_id = register_entity(\n hass, BINARY_SEN...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test RPC binary sensor.
async def test_rpc_binary_sensor( hass: HomeAssistant, mock_rpc_device, monkeypatch ) -> None: entity_id = f"{BINARY_SENSOR_DOMAIN}.test_cover_0_overpowering" await init_integration(hass, 2) assert hass.states.get(entity_id).state == STATE_OFF mutate_rpc_device_status( monkeypatch, mock_rp...
[ "async def test_binary_sensors(spa, setup_entry, hass: HomeAssistant) -> None:\n\n entity_id = f\"binary_sensor.{spa.brand}_{spa.model}_online\"\n state = hass.states.get(entity_id)\n # disabled by default\n assert state is None\n\n entity_id = f\"binary_sensor.{spa.brand}_{spa.model}_error\"\n st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test RPC restored binary sensor.
async def test_rpc_restored_sleeping_binary_sensor( hass: HomeAssistant, mock_rpc_device, device_reg, monkeypatch ) -> None: entry = await init_integration(hass, 2, sleep_period=1000, skip_setup=True) register_device(device_reg, entry) entity_id = register_entity( hass, BINARY_SENSOR_DOMAIN, "te...
[ "async def test_rpc_restored_sleeping_binary_sensor_no_last_state(\n hass: HomeAssistant, mock_rpc_device, device_reg, monkeypatch\n) -> None:\n entry = await init_integration(hass, 2, sleep_period=1000, skip_setup=True)\n register_device(device_reg, entry)\n entity_id = register_entity(\n hass, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test RPC restored sleeping binary sensor missing last state.
async def test_rpc_restored_sleeping_binary_sensor_no_last_state( hass: HomeAssistant, mock_rpc_device, device_reg, monkeypatch ) -> None: entry = await init_integration(hass, 2, sleep_period=1000, skip_setup=True) register_device(device_reg, entry) entity_id = register_entity( hass, BINARY_SENS...
[ "async def test_rpc_restored_sleeping_binary_sensor(\n hass: HomeAssistant, mock_rpc_device, device_reg, monkeypatch\n) -> None:\n entry = await init_integration(hass, 2, sleep_period=1000, skip_setup=True)\n register_device(device_reg, entry)\n entity_id = register_entity(\n hass, BINARY_SENSOR_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a pointspread function (PSF) for using the Richards and Wolf formula. See psf.PSF function for a detailed description.
def create_psf(zshape, rshape, zdims, rdims, ex_wavelen, em_wavelen, pinhole_radius, num_aperture=1.0, refr_index=1.333, pinhole_shape='square', psf_type=(psf.ISOTROPIC | psf.CONFOCAL), magnification=20.0): args = dict(shape=(int(zshape), int(rshape)), dims=(zdims, rd...
[ "def psf(p):\n # FIXME: Expression needs checking!\n y = n_u * u * scipy.special.jv(0, 2.*np.pi*u*p)\n return 2.*np.pi * scipy.integrate.simps(y, u)", "def psf(self, zv=ZernikeVector(), wavelength=550.*u.nm, plot=True):\n # poppy wants the wavelength in meters\n try:\n w = wavele...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deconvolve an image using the Richardson Lucy iterative algorithm. Subsequently apply a Wiener filter for denoising and improving the signal further.
def deconvolve(data, psf_vol, iterations, threshold=0, wiener_size=(3, 3, 3), wiener_noise=500000): data[data < threshold] = 0 data = richardson_lucy(image=data, psf=psf_vol, iterations=iterations, clip=False) gc.collect() data[data < 0] = 0 if wiener_noise != 0: data = wiener(data, my...
[ "def richardson_lucy_deconv(img, kernel, tot_iter=30, init=0.5):\n # img, kernel must both sum to 1, and are both greater or equal to 0\n kernel = kernel / np.sum(kernel)\n img = img / np.sum(img)\n\n print(np.sum(kernel))\n print(np.sum(img))\n if np.ndim(init) > 0:\n g1 = init\n else:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure the helper/namespace emulator can find `OpDef`s and create their meta objects. Also, check the basics of `TFlowMetaOperator`.
def test_meta_helpers(): assert isinstance(mt.add, TFlowMetaOperator) assert mt.add.node_def is None assert mt.add.op_def.obj.name == "Add" assert mt.add.reify() is mt.add assert isinstance(mt.matmul, TFlowMetaOperator) assert mt.matmul.op_def.obj.name == "MatMul" assert isinstance(mt.Rand...
[ "def test_operator_create_operator(self):\n pass", "def init_ops(self, no_mpe=False):\n self._init_ops_basics()\n if not no_mpe:\n print(\"Initializing MPE Ops...\")\n mpe_state_gen = spn.MPEState(log=True, value_inference_type=spn.InferenceType.MPE)\n if self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure we can construct an `inspect.Signature` object for a protobuf OpDef when its corresponding function isn't present in `tf.raw_ops`.
def test_opdef_sig(): from tensorflow.core.framework import op_def_pb2 custom_opdef_tf = op_def_pb2.OpDef() custom_opdef_tf.name = "MyOpDef" arg1_tf = op_def_pb2.OpDef.ArgDef() arg1_tf.name = "arg1" arg1_tf.type_attr = "T" arg2_tf = op_def_pb2.OpDef.ArgDef() arg2_tf.name = "arg2" ...
[ "def trampoline_signature(fn):\n\n # TODO: operator overloads\n names = []\n\n if fn[\"const\"]:\n names.append(\"K\")\n refqual = fn[\"ref_qualifiers\"]\n if refqual:\n if refqual == \"&\":\n names.append(\"R\")\n if refqual == \"&&\":\n names.append(\"O\")...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure we can create a Const tensor by hand.
def test_meta_const(): with tf.Graph().as_default(): one_mt = mt.const(1, "int32", "Const") with tf.Graph().as_default(): another_one_mt = mt(1) assert one_mt == another_one_mt assert isinstance(one_mt.reify(), tf.Tensor) assert one_mt.reify().op.type == "Const"
[ "def constant(const, axes=None, dtype=None):\n graph_label_type = \"<Const({})>\".format(const)\n val = AssignableTensorOp(axes=axes, constant=True, persistent=True, trainable=False,\n graph_label_type=graph_label_type, dtype=dtype)\n nptensor = np.asarray(const, dtype=val.dtype...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts the URL of the last `attr` attribute in the mission at `url`
def get_last_attr(url, attr): return read_riddle(url).split(f'{attr}="')[-1].split('"')[0]
[ "def get_last_attr_url(url, attr):\n attr = get_last_attr(url, attr)\n return \"{}/{}\".format(url.rsplit(\"/\", 1)[0], attr)", "def get_last_href_url(url):\n return get_last_attr_url(url, \"href\")", "def get_last_src_url(url):\n return get_last_attr_url(url, \"src\")", "def get_url_attribute(ele...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts the URL of the last `attr` attribute in the mission at `url`
def get_last_attr_url(url, attr): attr = get_last_attr(url, attr) return "{}/{}".format(url.rsplit("/", 1)[0], attr)
[ "def get_last_attr(url, attr):\n return read_riddle(url).split(f'{attr}=\"')[-1].split('\"')[0]", "def get_last_href_url(url):\n return get_last_attr_url(url, \"href\")", "def get_last_src_url(url):\n return get_last_attr_url(url, \"src\")", "def get_url_attribute(element, attr_name):\r\n value = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts the URL of the last `src` attribute in the mission at `url`
def get_last_src_url(url): return get_last_attr_url(url, "src")
[ "def get_last_href_url(url):\n return get_last_attr_url(url, \"href\")", "def get_last_attr_url(url, attr):\n attr = get_last_attr(url, attr)\n return \"{}/{}\".format(url.rsplit(\"/\", 1)[0], attr)", "def extract_pic_url(self):\n dom = DOM(self.page_source)\n tag_list = dom('a.rg_l')\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts the URL of the last `href` attribute in the mission at `url`
def get_last_href_url(url): return get_last_attr_url(url, "href")
[ "def get_last_attr_url(url, attr):\n attr = get_last_attr(url, attr)\n return \"{}/{}\".format(url.rsplit(\"/\", 1)[0], attr)", "def get_last_src_url(url):\n return get_last_attr_url(url, \"src\")", "def get_last_attr(url, attr):\n return read_riddle(url).split(f'{attr}=\"')[-1].split('\"')[0]", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open the databases used to track feeds and entries seen.
def openDBs(feed_db_fn, entry_db_fn): feed_db = shelve.open(feed_db_fn) entry_db = shelve.open(entry_db_fn) return (feed_db, entry_db)
[ "def db():", "def db_imports():\n import_energy_data()", "def populate_database(self):\n self.dye_stocks.add_new_dye_stocks()\n self.detections.add_new_detections()\n self.profiles.add_new_profiles()", "def open_db_session(event):\n request = event.request\n request.db = sqlite3....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Close the databases used to track feeds and entries seen.
def closeDBs(feed_db, entry_db): feed_db.close() entry_db.close()
[ "def close_on_exit(db):\r\n db.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.database_handle.close_databases()", "def exit_db(self):\n self.db_conn.close()", "def close_database(self):\n self.conn.close()", "def close_one(self):\n\t\tif not DBDict.closelock.acquire...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of feeds, poll feeds which have not been polled in over an hour. Look out for conditional HTTP GET status codes before processing feed data. Check if we've seen each entry in a feed, collecting any entries that are new. Sort the entries, then return the list.
def getNewFeedEntries(feeds, feed_db, entry_db): entries = [] for uri in feeds: print "Polling %s" % uri try: # Get the notes rememebered for this feed. feed_data = feed_db.get(uri, {}) last_poll = feed_data.get('last_poll', None) etag = feed_...
[ "def update_feeds():\n feedurls = get_feedurls()\n for url in feedurls:\n query_args = { 'q': url, 'v':'1.0', 'num': '30' }\n qs = urllib.urlencode(query_args)\n loader = 'http://ajax.googleapis.com/ajax/services/feed/load'\n loadurl = '%s?%s' % (loader, qs)\n request = urll...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of entries and an output filename, use templates to compose an aggregate page from the feeds and write to the file.
def writeAggregatorPage(entries, out_fn, date_hdr_tmpl, feed_hdr_tmpl, entry_tmpl, page_tmpl): out, curr_day, curr_feed = [], None, None for e in entries: # If this entry's date is not the current running day, change the # current day and add a date header to the page output. ...
[ "def _write_entries_group_venue(bib_entries):\n\n folder = os.path.join(params['htmlfile_group'], 'Venue')\n params['author_group_Venue'] = folder\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n count_name, count_number = _get_count_name_number(bib_entries)\n\n venue_entries_dict = {}\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an IM connection, a destination name, and a list of entries, send off a series of IMs containing entries rendered via template.
def sendEntriesViaIM(conn, to_nick, entries, im_chunk, feed_hdr_tmpl, entry_tmpl, msg_tmpl): out, curr_feed, entry_cnt = [], None, 0 for entry in entries: # If there's a change in current feed, note it and append a # feed header onto the message. if entry.feed.title != curr_fee...
[ "def writeAggregatorPage(entries, out_fn, date_hdr_tmpl, feed_hdr_tmpl, \n entry_tmpl, page_tmpl):\n out, curr_day, curr_feed = [], None, None\n\n for e in entries:\n # If this entry's date is not the current running day, change the \n # current day and add a date header to the page outpu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an IM bot, a destination name, and a list of content, render the message template and send off the IM.
def sendIMwithTemplate(conn, to_nick, out, msg_tmpl): try: msg_text = msg_tmpl % "".join(out) conn.sendIM(to_nick, msg_text) time.sleep(4) except KeyboardInterrupt: raise except Exception, e: print "\tProblem sending IM: %s" % e
[ "def send_templated(self, backend, recipient, template_name, **kwargs):\n message_content = self.render_template(template_name, **kwargs)\n data = {\"backend\": backend.value,\n \"recipient\": recipient,\n \"message\": message_content}\n self.send_notification_requ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save a list of feeds.
def saveSubs(feeds_fn, feeds): open(feeds_fn, "w").write("\n".join(feeds))
[ "def save_feeds():\n with open(os.path.join(__location__, 'feeds.json'), 'w') as f:\n json.dump(feeds_dict, f, indent=4)", "def save_all(self):\n\t\tfor tweet in self.list_of_tweets:\n\t\t\tself.__save_tweet(tweet)\n\t\tself.list_of_tweets = []\n\n\t\tlog.info(\"Save all tweets\")\n\n\t\tfor user in sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempt to remove a URI from the give list of subscriptions. Throws a SubsNotSubscribed exception if the URI wasn't found in the subscriptions.
def unsubscribeFeed(feeds, uri): if uri not in feeds: raise SubsNotSubscribed(uri) feeds.remove(uri)
[ "def removeSubscription(subscriber):", "def remove_subscription(self, request):\n user = request.user\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n topic_query = Topic.objects.get(id=serializer.data['id'])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of feeds and a URI at which to find feeds, try adding this feeds to the list.
def subscribeFeed(feeds, uri): feeds_found = feedfinder.getFeeds(uri) if len(feeds_found) == 0: raise SubsNoFeedsFound(uri) elif len(feeds_found) > 1: raise SubsMultipleFeedsFound(uri, feeds_found) else: feed_uri = feeds_found[0] if feed_uri in feeds: rais...
[ "def update_feeds():\n feedurls = get_feedurls()\n for url in feedurls:\n query_args = { 'q': url, 'v':'1.0', 'num': '30' }\n qs = urllib.urlencode(query_args)\n loader = 'http://ajax.googleapis.com/ajax/services/feed/load'\n loadurl = '%s?%s' % (loader, qs)\n request = urll...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the wrapper with feed and entry data.
def __init__(self, data, entry): self.data = data self.feed = data.feed self.entry = entry # Try to work out some sensible primary date for the entry, fall # back to the feed's date, and use the current time as a last resort. if entry.has_key("modified_parsed")...
[ "def __init__(self, data, entryCallback):\n XMLStreamReaderBase.__init__(self, data)\n \n self.__entryCallback = entryCallback\n \n self.version = \"\"\n self.baseUrl = \"\"", "def init():\n global root, updater, apiData\n\n with open(\"DataStore/Configs/telegramAPI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send request to process
def send_to_process(self, request): self.process.q.put(request)
[ "def call(self, request):\n return self.wait(self.send(request))", "def send_request(self, method, url, data=None, headers=None, timeout=None, instance_type=None):\n if not instance_type:\n instance_type = self.get_default_instance_type()\n resp = self.starter.send_request(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Abstract method to start a recieve loop to dispatch requests to Process
def start_recv_loop(self): raise NotImplementedError( "Abstract method that must be implemented by deriving class")
[ "def handle(self) -> None:\n while True:\n raw_command = self.request.recv(1024)\n if not raw_command:\n break\n result = dispatch(self.state, raw_command)\n self.request.send(result)", "def main_loop(self):\n \n while self.running:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Abstract method to stop the receive loop created by start_recv_loop
def stop_recv_loop(self): raise NotImplementedError( "Abstract method that must be implemented by deriving class")
[ "def stop(self):\n self._ioloop.stop()", "def stop(self):\n\t\tself._keepListening = False", "def start_recv_loop(self):\n raise NotImplementedError(\n \"Abstract method that must be implemented by deriving class\")", "async def _stop(self) -> None:\n self.logger.debug(\"Stoppi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a directory of tiff files, assumed to be images of uint16 and same size, sum all and return average as float32
def getaverage(dir): print(dir) for (dirpath,dirname,filenames) in walk(dir): count = 0 for f in filenames: if f.endswith(".tif"): count = count+1 with open(dir+'/'+f,'rb') as fptr: im = Image.open(fptr) imarray ...
[ "def compute_average(imlist):\n\t# open first image and make into an array of type float\n\taverageim = array(Image.open(imlist[0]),'f')\n\tfor imname in imlist[1]:\n\t\ttry:\n\t\t\taverageim += array(Image.open(imname))\n\t\texcept:\n\t\t\tprint imname + '...skipped'\n\taverageim /= len(imlist)\n\t#return average ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inclusion tag listing the most recently viewed products
def recently_viewed_products(context, current_product=None): request = context['request'] products = history.get(request) if current_product: products = [p for p in products if p != current_product] return {'products': products, 'request': request}
[ "def recently_viewed_products(context):\n request = context['request']\n product_ids = history_helpers.get_recently_viewed_product_ids(request)\n product_dict = product_models.Product.browsable.in_bulk(product_ids)\n \n # Reordering as the id order gets messed up in the query\n product_ids.reverse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AddPolicySelf provides a method for dispatcher to add authorization rules to the current policy. The function returns the rules affected and error.
def add_policy_self(self, should_persist, sec, ptype, rules): no_exists_policy = [] for rule in rules: if not self.get_model().has_policy(sec, ptype, rule): no_exists_policy.append(rule) if should_persist: try: if isinstance(self.adapter,...
[ "def add_rule(self, *args):\n return _wali.EWPDS_add_rule(self, *args)", "def PolicyEnforcement(self) -> PolicyEnforcement:", "def export_enforcePolicies( self, granularity, name, useNewRes = True ):\n\n gLogger.info( \"ResourceManagementHandler.enforcePolicies: Attempting to enforce policies for %s %...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
remove_policy_self provides a method for dispatcher to remove policies from current policy. The function returns the rules affected and error.
def remove_policy_self(self, should_persist, sec, ptype, rules): if should_persist: try: if isinstance(self.adapter, batch_adapter): self.adapter.remove_policy(sec, ptype, rules) except Exception as e: self._e.logger.error("An exception...
[ "def remove_policy(self, policy_name):\n\t\ttry: \n\t\t\tdel self.policies[policy_name]\n\t\texcept KeyError:\n\t\t\tpass", "def remove_filtered_policy_self(self, should_persist, sec, ptype, field_index, *field_values):\n if should_persist:\n try:\n self.adapter.remove_filtered_po...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
remove_filtered_policy_self provides a method for dispatcher to remove an authorization rule from the current policy,field filters can be specified. The function returns the rules affected and error.
def remove_filtered_policy_self(self, should_persist, sec, ptype, field_index, *field_values): if should_persist: try: self.adapter.remove_filtered_policy(sec, ptype, field_index, field_values) except Exception as e: self._e.logger.error("An exception occu...
[ "def remove_policy_self(self, should_persist, sec, ptype, rules):\n if should_persist:\n try:\n if isinstance(self.adapter, batch_adapter):\n self.adapter.remove_policy(sec, ptype, rules)\n except Exception as e:\n self._e.logger.error(\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
clear_policy_self provides a method for dispatcher to clear all rules from the current policy.
def clear_policy_self(self, should_persist): if should_persist: try: self.adapter.save_policy(None) except Exception as e: self._e.logger.error("An exception occurred: " + e) self.get_model().clear_policy()
[ "def clear_others_rules(self):\n\t\tself.others_rules.clear()", "def reset_policy(self, policy=None):\n if policy is None:\n self._reset_read_policy()\n self._reset_write_policy()\n elif policy == 'read':\n self._reset_read_policy()\n elif policy == 'write':\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a list of tags and a key to find and returns the value or None if not found
def try_find_tag(self, tags_list, tag_key): if tags_list is None or tags_list.keys() is None: return None return next((tags_list[key] for key in tags_list.keys() if key == tag_key), None)
[ "def tag_value(data, key):\n prefix = '{0}:'.format(key)\n for tag in data.tags:\n if tag.startswith(prefix):\n return tag[len(prefix):]\n raise KeyError()", "def get_tag(tags: Iterable[Dict[str, str]], key: str) -> str:\n if not tags:\n return ''\n name_tag = filter(lambda...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get not only fist but all index of a value in a list or tuple.
def get_list_index(lst: Union[list, tuple], value): return [i for i, v in enumerate(lst) if v == value]
[ "def get_index(item: Optional[List], value: int):\n if item is None:\n return None\n else:\n try:\n return item.index(value)\n except ValueError:\n return None", "def none_if_tuple_out_of_idx(t, index):\n return t[index] if len(t) > index else None", "def firs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
slice list by step.
def list_step_slice(lst: list, step: int = 1): assert isinstance(lst, (list, tuple)) for i in range(0, len(lst), step): yield lst[i:i + step]
[ "def build_list_with_step(length, step):\n lst = []\n i = 0\n while len(lst) < length:\n if i % step == 0:\n lst.append(i)\n i += 1\n return lst", "def chunks(lst, amount):\n return [lst[i:i + amount] for i in range(0, len(lst), amount)]", "def get_slice(seq,start=0,stop=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get a object md5, if this obj is not supported by `json.dumps` please provide a trains_func.
def get_md5(obj, trans_func=None): if trans_func is None: trans_func = json.dumps obj_str = trans_func(obj) hl = hashlib.md5() hl.update(obj_str.encode(encoding='utf-8')) return hl.hexdigest()
[ "def json_encode_hash(obj: Union[List[Any], Dict[str, Any]]) -> str:\n obj_hash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(obj, sort_keys=True).encode()\n obj_hash.update(encoded)\n return obj_hash.hexdigest()", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
produce a mock series of engine calls. These are expected to match enginerelated calls established by the test subject.
def _assert_engines(self): writer_conn = SingletonConnection() writer_engine = SingletonEngine(writer_conn) if self.slave_uri: async_reader_conn = SingletonConnection() async_reader_engine = SingletonEngine(async_reader_conn) else: async_reader_conn =...
[ "def test_main_routine_calls_Manager_methods_variant1(self):\n\n module.Manager = Mock()\n\n module.Manager.a = Mock()\n module.Manager.b = Mock()\n module.Manager.c = Mock()\n\n main_routine()\n\n expected_calls = [call(), call().a(), call().b(), call().c(\"c_args\")]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the instrumentation applied to a context class is independent of a specific _TransactionContextManager / _TransactionFactory.
def test_multiple_factories(self): mgr1 = enginefacade.transaction_context() mgr1.configure( connection=self.engine_uri, slave_connection=self.slave_uri ) mgr2 = enginefacade.transaction_context() mgr2.configure( connection=self.engine_uri, ...
[ "def test_multiple_factories_nested(self):\n mgr1 = enginefacade.transaction_context()\n mgr1.configure(\n connection=self.engine_uri,\n slave_connection=self.slave_uri\n )\n mgr2 = enginefacade.transaction_context()\n mgr2.configure(\n connection=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the instrumentation applied to a context class supports nested calls among multiple _TransactionContextManager objects.
def test_multiple_factories_nested(self): mgr1 = enginefacade.transaction_context() mgr1.configure( connection=self.engine_uri, slave_connection=self.slave_uri ) mgr2 = enginefacade.transaction_context() mgr2.configure( connection=self.engine_u...
[ "def test_request_context_and_transaction(self, *args):\n with self.app.test_request_context(*args) as ctx:\n transaction = current_session.begin_nested()\n self.app.manager = self.circulation_manager_setup(\n current_session\n )\n yield ctx\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test that everything in CONF.database.iteritems() is accepted. There's a handful of options in oslo.db.options that seem to have no meaning, but need to be accepted. In particular, Cinder and maybe others are doing exactly this call.
def test_all_options(self): factory = enginefacade._TransactionFactory() cfg.CONF.register_opts(options.database_opts, 'database') factory.configure(**dict(cfg.CONF.database.items()))
[ "def _check_db_cridentials(self):\n OK = True\n if(self.__test_profile.has_section('DATABASE')):\n if(not self.__test_profile.has_option('DATABASE', 'username')):\n self.__logger.error(\"The test profile requires a 'username' entry in the [DATABASE] section.\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dynamically load a plugin associated to a given hook. The plugin module should be located at 'dci_agent.plugins.plugin_', within the plugin module a class should be found with the name corresponding to the hook's name with the first character capitalized.
def load_plugin(hook): try: module_path = 'dci_agent.plugins.plugin_%s' % hook loaded_module = importlib.import_module(module_path) class_name = hook.capitalize() return getattr(loaded_module, hook.capitalize()) except ImportError: print("hook '%s' does not exist." % hoo...
[ "def load_plugin(self, plugin_name):\n modulename, classname = plugin_name.split(\".\")\n module = __import__(\"abbott.plugins.\"+modulename, fromlist=[classname])\n \n pluginclass = getattr(module, classname)\n \n plugin = pluginclass(plugin_name, self._transport, self)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
run the load_notes method with no existing file
def test_load_notes_file_not_found(tmp_path): # monkeypatch.setattr(dml.pathlib.Path, 'exists', lambda *x: False) apofile = tmp_path / 'apropos.apo' opts, apodata = dml.load_notes(apofile) assert opts == {"AskBeforeHide": True, "ActiveTab": 0, 'language': 'eng', 'NotifyOnSave': True,...
[ "def test_load_notes_not_a_pickle(tmp_path):\n apofile = tmp_path / 'apropos.apo'\n with apofile.open(mode='w') as f:\n f.write(\"oihgyavjjvjdvj diidn dnni\")\n f.write(\"\\n\")\n opts, apodata = dml.load_notes(apofile)", "def __init__(self, notes='No notes provided', raw_file_name=None, *...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
run the load_notes method with a nonpickle file
def test_load_notes_not_a_pickle(tmp_path): apofile = tmp_path / 'apropos.apo' with apofile.open(mode='w') as f: f.write("oihgyavjjvjdvj diidn dnni") f.write("\n") opts, apodata = dml.load_notes(apofile)
[ "def test_load_notes_file_not_found(tmp_path):\n # monkeypatch.setattr(dml.pathlib.Path, 'exists', lambda *x: False)\n apofile = tmp_path / 'apropos.apo'\n opts, apodata = dml.load_notes(apofile)\n assert opts == {\"AskBeforeHide\": True, \"ActiveTab\": 0, 'language': 'eng',\n 'Notify...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the first index at which the given value is found. >>> ll = LinkedList() >>> ll.index(1) 1 >>> ll.push(ListNode(2)) >>> ll.push(ListNode(3)) >>> ll.index(3) 1 >>> ll.push(ListNode(3)) >>> ll.index(3) 1
def index(self, value: int) -> int: current = self.head index = 0 while current: if current.value == value: return index current = current.next index += 1 return -1
[ "def linear_search(value, num_list, index):\n if index == len(num_list):\n return -1\n if value == num_list[index]:\n return index\n return linear_search(value, num_list, index + 1)", "def first_different_index(lst, val):\n for index, number in enumerate(lst):\n if number != val: \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of time sthe given value appears. >>> ll = LinkedList() >>> ll.count(3) 0 >>> ll.push(ListNode(2)) >>> ll.push(ListNode(3)) >>> ll.count(3) 1 >>> ll.push(ListNode(3)) >>> ll.count(3) 2
def count(self, value: int) -> int: count = 0 current = self.head while current: if current.value == value: count += 1 current = current.next return count
[ "def count(self, value: object) -> int:\n count = 0\n current = self.head\n while current.next != self.tail:\n current = current.next\n if current.value == value:\n count += 1\n return count", "def count(self, value: object) -> int:\n\n # Ini...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test homodyne measurement of a squeeze state is correct, returning a variance of np.exp(2r)h/2, via the frontend
def test_squeeze_variance_frontend(self, setup_eng, hbar, tol): eng, prog = setup_eng(1) with prog.context as q: ops.Sgate(R) | q ops.MeasureX | q res = np.empty(0) for i in range(N_MEAS): eng.run(prog) res = np.append(res, q[0].val) ...
[ "def test_squeeze_variance(self, setup_backend, hbar, pure, monkeypatch, tol):\n # TODO: this test is a backend test that duplicates\n # the existing `test_squeeze_variance` integration test.\n # It should live in the backend folder, but currently takes too\n # long to run both.\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test x displacement on the Gaussian backend gives correct displacement
def test_x_displacement(self, setup_eng, hbar, tol): eng, prog = setup_eng(1) with prog.context as q: ops.Xgate(X) | q state = eng.run(prog).state mu_x = state.means()[0] assert state.hbar == hbar assert np.allclose(mu_x, X, atol=tol, rtol=0)
[ "def gaussian(self, mu, sigma, x):\n return np.exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / np.sqrt(2.0 * np.pi * (sigma ** 2))", "def __reward_Gaussian(self, x):\n return np.exp(-x*x/2.0)", "def gaussian(mu, sigma, x):\n return np.exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / np.sqrt(2.0 * np...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test homodyne measurement of a squeeze state is correct, returning a variance of np.exp(2r)h/2
def test_squeeze_variance(self, setup_backend, hbar, pure, monkeypatch, tol): # TODO: this test is a backend test that duplicates # the existing `test_squeeze_variance` integration test. # It should live in the backend folder, but currently takes too # long to run both. # We shou...
[ "def test_squeeze_variance_frontend(self, setup_eng, hbar, tol):\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n ops.Sgate(R) | q\n ops.MeasureX | q\n\n res = np.empty(0)\n\n for i in range(N_MEAS):\n eng.run(prog)\n res = np.append(re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Importer for the PTS file format. Assumes version 1 of the format.
def pts_importer(filepath, image_origin=True, z=False, **kwargs): with open(filepath, 'r') as f: lines = [l.strip() for l in f.readlines()] line = lines[0] while not line.startswith('{'): line = lines.pop(0) if not z: xs = [] ys = [] for line in lines: ...
[ "def readPts(ptsStr):\n\tfids = []\n\tpts = []\n\tlast = None\n\tfor line in ptsStr.split(\"\\n\"):\n\t\titems = line.split(\"\\t\")\n\t\tsample, type, index, mat, subindex, x, y = items\n\t\tif sample == \"Sample\":\n\t\t\tcontinue\n\t\tptList = None\n\t\tif type == \"S\":\n\t\t\tif (last == None) or (last[0] != m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a file handle to write in to (which should act like a Python `file` object), write out the landmark data. No value is returned. Writes out the LJSON format which is a verbose format that closely resembles the labelled point graph format. It describes semantic labels and connectivity between labels. The first axis...
def ljson_exporter(lmk_points, filepath, **kwargs): lmk_points[np.isnan(lmk_points)] = None lmk_points = [list(_tmp) for _tmp in lmk_points] ljson = { 'version': 2, 'labels': [], 'landmarks': { 'points': lmk_points } } with open(filepath, "w") as file_...
[ "def LJSONExporter(landmark_group, file_handle, **kwargs):\n lg_json = landmark_group.tojson()\n # Add version string\n lg_json['version'] = 2\n\n # Convert nan values to None so that json correctly maps them to 'null'\n points = lg_json['landmarks']['points']\n # Flatten list\n try:\n n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a file handle to write in to (which should act like a Python `file` object), write out the landmark data. No value is returned. Writes out the PTS format which is a very simple format that does not contain any semantic labels. We assume that the PTS format has been created using Matlab and so use 1based indexing ...
def pts_exporter(pts, file_handle, **kwargs): # Swap the x and y axis and add 1 to undo our processing # We are assuming (as on import) that the landmark file was created using # Matlab which is 1 based if len(pts.shape) == 2: pts = pts[:, [1, 0]] + 1 else: pts = pts[:, [2, 1, 0]] +...
[ "def PTSExporter(landmark_group, file_handle, **kwargs):\n pts = landmark_group.lms.points\n # Swap the x and y axis and add 1 to undo our processing\n # We are assuming (as on import) that the landmark file was created using\n # Matlab which is 1 based\n pts = pts[:, [1, 0]] + 1\n\n header = 'ver...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read .flo file in Middlebury format
def readFlow(fn): # Code adapted from: # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy # WARNING: this will work on little-endian architectures (eg Intel x86) only! with open(fn, 'rb') as f: magic = np.fromfile(f, np.float32, count=1) ...
[ "def read(f):\n\tp = HMMParser()\n\treturn p.read(f)", "def _read_molly_head(mf):\n\n # If 'fbytes' in the next line comes up blank, we have reached the end of\n # the file\n fbytes = mf.read(4)\n if fbytes == '': return None\n\n # If it does not start with 44 in either big or little endian form,\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the list of the references to the bound services
def get_bindings(self): with self._lock: return list(self.services.keys())
[ "def services(self):\n return self.__services", "def getRefs(self, **kwargs):\n return []", "def getServiceNames(self):\n self.send_getServiceNames()\n return self.recv_getServiceNames()", "def list_bindings(self):\n endpoint = self.build_url(\"/bindings\")\n return self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called by the framework when a service event occurs
def service_changed(self, event): if ( self._ipopo_instance is None or not self._ipopo_instance.check_event(event) ): # stop() and clean() may have been called after we have been put # inside a listener list copy... # or we've been told to igno...
[ "def serviceStarted(self):", "def __onEventReadyHandler(self):\n self.__service_signal.set()", "def serviceStopped(self):", "def process_events(self):\n pass", "def _service_task(self):\r\n pass", "def on_start(self, event):\n pass", "def start_services(self):", "def servic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stores the given service in the dictionary
def __store_service(self, key, service): self._future_value.setdefault(key, []).append(service)
[ "def push_service_into_list(self, name, service):\n self.services[name] = service\n if service.required:\n self.services['required'].append(name)\n else:\n self.services['optional'].append(name)\n self.logger.debug('added %s to the service list', name)\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes the given service from the future dictionary
def __remove_service(self, key, service): try: # Remove the injected service prop_services = self._future_value[key] prop_services.remove(service) # Clean up if not prop_services: del self._future_value[key] except KeyError: ...
[ "def remove_service(self, url):\n self.services = [post for post in self.services if post[2] != url]\n with open(os.path.join(self.working_directory, self.file_name), \"w\") as file:\n json.dump(self.services, file)\n return \"Ok\"", "def remove_service(self, service_id):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert MMDetection checkpoint to Detectron2 style.
def convert(src: str, dst: str) -> None: "mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]" assert src.endswith('pkl') or src.endswith('pth'), 'the source Detectron2 checkpoint should endswith `pkl` or `pth`.' mm_model = torch.load(src) det2_model = pickle.load(open(dst, 'rb')) det2_state...
[ "def unpack_checkpoint(self, checkpoint, model, criterion, optimizer, scheduler) -> None:\n pass", "def convert(tool='copilot'):", "def restore_model_from_checkpoint(self, ckptpath, session):\n # Restore variables from disk.\n self.log_file.write('[*] Loading checkpoints from %s\\n' % ckptp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return coverage data for the named module as absolute addresses. If no name is given / available via self.modules, the trace is assumed to be a an ABSOLUTE ADDRESS TRACE. These are arugably the least flexible kind of traces available, but are still provided as an option. This fuction should return a list of integers re...
def get_addresses(self, module_name=None): raise NotImplementedError("Absolute addresses not supported by this log format")
[ "def get_offsets(self, module_name):\n raise NotImplementedError(\"Relative addresses not supported by this log format\")", "def get_ranges_covered_by_stubs(self, dump_dir):\n output_dfnames = self.oflister.get_reg_files_for_filepart_possible(\n self.oflister.makeargs(dump_dir, self.list_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return coverage data for the named module as relative offets. This function should return a list of integers representing the relative offset of an executed instruction OR basic block from the base of the requested module (module_name). It is okay to return an instruction trace, OR a basic block trace from thin functio...
def get_offsets(self, module_name): raise NotImplementedError("Relative addresses not supported by this log format")
[ "def get_offset_blocks(self, module_name):\n raise NotImplementedError(\"Block form not supported by this log format\")", "def get_coverage(self):\n\n # returns whether the node is a constant (e.g. a docstring or `...`)\n def is_constant(node):\n if not isinstance(node, ast.Expr):\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return coverage data for the named module in block form. This function should return a list of tuples representing the coverage for the requested module (module_name). The tuples must be in the form of (offset, size). offset a relative offset from the module_name base address size the size of the instruction, block, or...
def get_offset_blocks(self, module_name): raise NotImplementedError("Block form not supported by this log format")
[ "def get_coverage(self):\n\n # returns whether the node is a constant (e.g. a docstring or `...`)\n def is_constant(node):\n if not isinstance(node, ast.Expr):\n return False\n return isinstance(node.value, ast.Constant)\n\n # iterate over all nodes\n coverage = []\n for node_info in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the total rotor power related to hte thrust generation based on P_parasiticbody, P_profilerotor, P_inducedrotor, and P_climb
def rotor_hover_power(Weight, gamma, num_rotor, f_body, AtmData, Propeller, Cd_bar=None): # return P_aero, power_distr # General K_induced = 1.15 # typicaly 1.13 - 1.15, pick largest for safety c_bar = Propeller.c_bar if c_bar is None: raise TypeError("Please remember to set c_bar into Prop...
[ "def CruiseThrustPower(self):\n return ( self.CruiseThrust * self.Aircraft['Cruise Speed'] ).to(ureg['MW'])", "def calc_pwr( BW_used_rd_1, BW_used_wr_1, p_empty_1, p_miss_1, p_hit_1, t_ppd_1, t_sr_1, P_tot_1, mem_conf_names, BW_used_rd_2, BW_used_wr_2 ):\n\n mem_conf_lookup = {'ddr3-800' : '800',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of distinct number of elements named 'subCollectionName' per year.
def getSubCollectionSizePerYear(self, subCollectionName, venue, yearMin, yearMax): venueRegx = re.compile("^{}$".format(venue), re.IGNORECASE) return self.db.papers.aggregate([{ "$match": {"$and": [ {"venue": venueRegx}, {"year": {"$gte": yearMin, "$lte": yea...
[ "def getSubCollectionSizePerVenues(self, subCollectionName, venues, year):\n venuesRgx = [re.compile(\"^{}$\".format(v), re.IGNORECASE) for v in venues]\n return self.db.papers.aggregate([{\n \"$match\": {\"$and\": [\n {\"venue\": {\"$in\": venuesRgx}},\n {\"ye...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of distinct number of elements named 'subCollectionName' per venue for a precise year.
def getSubCollectionSizePerVenues(self, subCollectionName, venues, year): venuesRgx = [re.compile("^{}$".format(v), re.IGNORECASE) for v in venues] return self.db.papers.aggregate([{ "$match": {"$and": [ {"venue": {"$in": venuesRgx}}, {"year": year}, ...
[ "def getSubCollectionSizePerYear(self, subCollectionName, venue, yearMin, yearMax):\n\n venueRegx = re.compile(\"^{}$\".format(venue), re.IGNORECASE)\n return self.db.papers.aggregate([{\n \"$match\": {\"$and\": [\n {\"venue\": venueRegx},\n {\"year\": {\"$gte\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the top N items of type 'elementType' subject to the filters provided.
def getTopNElements(self, n, elementType, filterKeys, filterValues): aggregation_pipeline = [] # First expand sub-collections we want to filter or search. if elementType == "authors" or "author" in filterKeys: aggregation_pipeline.append({ "$unwind": "$authors", ...
[ "def get_top_five():\n\n # this is simply a placeholder until I create the logic to query top movies based on num reviews and star ratings...\n t1 = Movie.objects.get(name__icontains='out of the past')\n t2 = Movie.objects.get(name__icontains='double indem')\n t3 = Movie.objects.get(name__icontains='big...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the amount of publication per year for a specified venue.
def getAmountPublicationPerYear(self, venue): year_publication = {} for j in self.json_file: if j['venue'].upper() == venue.upper(): if j['year'] != "": if (j['year'] in year_publication): year_publication[j['year']] += 1 ...
[ "def cve_count_by_year(year):\n count = CVE.query.filter(\n year == db.extract(\"year\", CVE.published_date)\n ).count()\n\n return {\n \"cve_count\": count\n }", "def getSubCollectionSizePerVenues(self, subCollectionName, venues, year):\n venuesRgx = [re.compile(\"^{}$\".format(v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the times that author was cited between yearMin & yearMax.
def getNumberOfTimeCitedPerYear(self, yearMin, yearMax, author): dict_citation = {} for i in range(yearMin, yearMax + 1): dict_citation[i] = 0 authorID = self.getAuthorID(author, self.json_file) print(authorID) if (authorID != ""): for j in self.json_fil...
[ "def get_pub_years(self, min_year=3000, max_year=0):\n years_counter = (int(ct.first_publication_date[0][0:4]) for ct in self.get_ctrees())\n years = Counter()\n for year in years_counter:\n if year < min_year:\n min_year = year\n if year > max_year:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the nome_impresso of this AdicionalPersist. {{{adicional_persist_nome_impresso_value}}}
def nome_impresso(self): return self._nome_impresso
[ "def get_nome(self):\n\n nome = \"{0} + {1}\".format(self.coquetel.get_nome(), self.nome)\n\n return nome", "def get_nombre_apellidos(self):\n return self.__nombre + \" \" + self.__apellidos", "def nom(self):\n return self._nom", "def sito_istituzionale(self) -> str:\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the nome_impresso of this AdicionalPersist. {{{adicional_persist_nome_impresso_value}}}
def nome_impresso(self, nome_impresso): self._nome_impresso = nome_impresso
[ "def inativo(self, inativo):\n self._inativo = inativo", "def nom(self, nom):\n\n self._nom = nom", "def nome_logradouro(self, nome_logradouro):\n self._nome_logradouro = nome_logradouro", "def imprime_persona(self):\n print(\n \"Nombre: {} \\nEdad: {}\\nFecha de nacimie...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the numero_receita_federal of this AdicionalPersist. {{{adicional_persist_numero_receita_federal_value}}}
def numero_receita_federal(self): return self._numero_receita_federal
[ "def faixa_nosso_numero(self):\n return self._faixa_nosso_numero", "def recurring_fee(self):\n return self._recurring_fee", "def fixed_fee_frequency(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"fixed_fee_frequency\")", "def fixed_fee_frequency(self) -> Optional[pulumi.Input[in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the numero_receita_federal of this AdicionalPersist. {{{adicional_persist_numero_receita_federal_value}}}
def numero_receita_federal(self, numero_receita_federal): self._numero_receita_federal = numero_receita_federal
[ "def federal_tax_id_type(self, federal_tax_id_type):\n\n self._federal_tax_id_type = federal_tax_id_type", "def faixa_nosso_numero(self, faixa_nosso_numero):\n self._faixa_nosso_numero = faixa_nosso_numero", "def set_fumi_number(self, number):\n self._cp['fumi_number'] = number", "def ren...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the data_nascimento of this AdicionalPersist. {{{adicional_persist_data_nascimento_value}}}
def data_nascimento(self): return self._data_nascimento
[ "def getDataInregistrare(self):\n return self.dataInregistrare", "def nodata_value(self):\n return self.__nodata_value", "def faixa_nosso_numero(self):\n return self._faixa_nosso_numero", "def __get_diccionario_ataques(self):\r\n return self.__diccionario_ataques", "def getCantid...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the data_nascimento of this AdicionalPersist. {{{adicional_persist_data_nascimento_value}}}
def data_nascimento(self, data_nascimento): self._data_nascimento = data_nascimento
[ "def setDataInregistrare(self, data):\n self.dataInregistrare = data", "def setCantidadMonodroga(self, cantidad_monodroga):\r\n self.cantidad_monodroga = cantidad_monodroga", "def datafangstdato(self, datafangstdato):\n\n self._datafangstdato = datafangstdato", "def data_accreditamento(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the sexo of this AdicionalPersist. {{{adicional_persist_sexo_value}}}
def sexo(self): return self._sexo
[ "def get_salario(self):\n\n return self.salario", "def get_soma(self):\n soma = None\n somaList = self.get_node_by_types([SOMA])\n if somaList:\n soma = somaList[0]\n return soma", "def __comprobarSexo(self):\n\t\tif self.__sexo != 'H' and self.sexo != 'M':\n\t\t\ts...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the sexo of this AdicionalPersist. {{{adicional_persist_sexo_value}}}
def sexo(self, sexo): self._sexo = sexo
[ "def seco_1(self, seco_1):\n\n\n self._seco_1 = seco_1", "def SO(self, value):\n self.__South = value", "def sales_tax(self, sales_tax):\n\n self._sales_tax = sales_tax", "def set_spo_to_save(self, spo_to_save):\n if isinstance(spo_to_save, int):\n self.spo_to_save = spo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the numero_identidade of this AdicionalPersist. {{{adicional_persist_numero_identidade_value}}}
def numero_identidade(self): return self._numero_identidade
[ "def get_numero(self):\r\n return self.__numero", "def faixa_nosso_numero(self):\n return self._faixa_nosso_numero", "def ultimo_nosso_numero(self):\n return self._ultimo_nosso_numero", "def max_nosso_numero(self):\n return self._max_nosso_numero", "def pid(self):\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the numero_identidade of this AdicionalPersist. {{{adicional_persist_numero_identidade_value}}}
def numero_identidade(self, numero_identidade): self._numero_identidade = numero_identidade
[ "def faixa_nosso_numero(self, faixa_nosso_numero):\n self._faixa_nosso_numero = faixa_nosso_numero", "def ultimo_nosso_numero(self, ultimo_nosso_numero):\n self._ultimo_nosso_numero = ultimo_nosso_numero", "def setCantidadMonodroga(self, cantidad_monodroga):\r\n self.cantidad_monodroga = ca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the orgao_expedidor_identidade of this AdicionalPersist. {{{adicional_persist_orgao_expedidor_identidade_value}}}
def orgao_expedidor_identidade(self): return self._orgao_expedidor_identidade
[ "def getEdifici(self):\n lang = self.pref_lang()\n edifici = self._dadesUnitat['edifici_' + lang]\n return edifici", "def orcid_identity(self):\n from api.models.inspirehep import OrcidIdentity\n try:\n return OrcidIdentity.objects.get(orcid_value=self['value'])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the orgao_expedidor_identidade of this AdicionalPersist. {{{adicional_persist_orgao_expedidor_identidade_value}}}
def orgao_expedidor_identidade(self, orgao_expedidor_identidade): self._orgao_expedidor_identidade = orgao_expedidor_identidade
[ "def ex_ord_id(self, ex_ord_id):\n\n self._ex_ord_id = ex_ord_id", "def onchange_emp_id(self, cr, uid, ids, emp_id,context={}):\n employee_obj = self.pool.get('hr.employee')\n employee = employee_obj.browse(cr, uid, [emp_id], context=context)[0]\n\n if not emp_id:\n return {...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the unidade_federativa_identidade of this AdicionalPersist. {{{adicional_persist_unidade_federativa_identidade_value}}}
def unidade_federativa_identidade(self): return self._unidade_federativa_identidade
[ "def vios_id(self):\n return self._get_val_int(_VADPT_LOCAL_ID)", "def getEdifici(self):\n lang = self.pref_lang()\n edifici = self._dadesUnitat['edifici_' + lang]\n return edifici", "def get_identificaties(self):\n query = \"\"\"\n select entity_id, field_identific...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the unidade_federativa_identidade of this AdicionalPersist. {{{adicional_persist_unidade_federativa_identidade_value}}}
def unidade_federativa_identidade(self, unidade_federativa_identidade): self._unidade_federativa_identidade = unidade_federativa_identidade
[ "def _set_udid(self, instance, vios_uuid, volume_id, udid):\n udid_key = self._build_udid_key(vios_uuid, volume_id)\n instance.system_metadata[udid_key] = udid", "def set_session_filming_invoice(session, filming_fee):\n session['cols_filming_invoice'] = filming_fee.id\n session.modifie...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the data_emissao_identidade of this AdicionalPersist. {{{adicional_persist_data_emissao_identidade_value}}}
def data_emissao_identidade(self): return self._data_emissao_identidade
[ "def data_id(self) -> str:\n return self.entity_description.data_id", "def getEdifici(self):\n lang = self.pref_lang()\n edifici = self._dadesUnitat['edifici_' + lang]\n return edifici", "def get_id(self):\n return self[\"ds_id\"]", "def data_id(self) -> Optional[str]:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the data_emissao_identidade of this AdicionalPersist. {{{adicional_persist_data_emissao_identidade_value}}}
def data_emissao_identidade(self, data_emissao_identidade): self._data_emissao_identidade = data_emissao_identidade
[ "def data_id(self, data_id: str):\n\n self._data_id = data_id", "def setDataInregistrare(self, data):\n self.dataInregistrare = data", "def id_tipo_estabelecimento(self, id_tipo_estabelecimento):\n self._id_tipo_estabelecimento = id_tipo_estabelecimento", "def setDataIDAttribute(self, dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the id_estado_civil of this AdicionalPersist. {{{adicional_persist_id_estado_civil_value}}}
def id_estado_civil(self): return self._id_estado_civil
[ "def inativo(self):\n return self._inativo", "def vios_id(self):\n return self._get_val_int(_VADPT_LOCAL_ID)", "def id_tipo_estabelecimento(self):\n return self._id_tipo_estabelecimento", "def IaidValue(self):\n if self.force_auto_sync:\n self.get('IaidValue')\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the id_estado_civil of this AdicionalPersist. {{{adicional_persist_id_estado_civil_value}}}
def id_estado_civil(self, id_estado_civil): self._id_estado_civil = id_estado_civil
[ "def inativo(self, inativo):\n self._inativo = inativo", "def id_tipo_estabelecimento(self, id_tipo_estabelecimento):\n self._id_tipo_estabelecimento = id_tipo_estabelecimento", "def _set_ivid(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDyn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }