query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Responsible for setting the result of the 'start' (for this specific object) and waiting on the 'end' future associated with this object. This effectively coordinates puts one frame on the pulpit at a time.
async def schedule_frames(self): while True: print('starting wait in schedule_frames') self.executing_frame = await self.frame_queue.get() if self.executing_frame is None: #Just in case we want to stop the PulpitObject print("Got null frame. Stopping Pulpit")...
[ "def wait(self):\n self.phase1()\n self.phase2()", "def start(self):\n\n try:\n self.extend_start_position()\n except StartPointNotInRangeError:\n print('Staring point is not range (1m)')\n else: \n self._communicator.post_speed(0, self._TARGET_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produces the desired output based on the frame and detected objects within it
def produce_output(self, frame: ndarray, detected_objects: List[BoundingBox]) -> None: raise NotImplementedError
[ "def bounding_boxes(frame, output, args):\n width = int(frame.shape[1]) \n height = int(frame.shape[0])\n op_count = 0 # Number of objects detected in the frame\n \n for box in output: # Output is squeezed here\n output_id = box[0]\n label = box[1]\n conf = box[2]\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of nodes (tools) that has "filename" as an input
def _get_nodes_with_input(self, filename): tools = [] for tool in self.graph.nodes(): for varname in tool.__dict__: if varname.startswith(pyjobmanager.constants.INPUT): obj = tool.__dict__[varname] # can be a list or a string if obj.__...
[ "def _get_nodes_with_output(self, filename):\n tools = []\n for tool in self.graph.nodes():\n for varname in tool.__dict__:\n if varname.startswith(pyjobmanager.constants.OUTPUT):\n obj = tool.__dict__[varname] # can be a list or a string\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of nodes (tools) that has "filename" as an output
def _get_nodes_with_output(self, filename): tools = [] for tool in self.graph.nodes(): for varname in tool.__dict__: if varname.startswith(pyjobmanager.constants.OUTPUT): obj = tool.__dict__[varname] # can be a list or a string if obj ...
[ "def _get_nodes_with_input(self, filename):\n tools = []\n for tool in self.graph.nodes():\n for varname in tool.__dict__:\n if varname.startswith(pyjobmanager.constants.INPUT):\n obj = tool.__dict__[varname] # can be a list or a string\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write a dot file with the pipeline graph
def _write_dot(self): if self.dot_file: write_dot(self.graph, self.dot_file)
[ "def write_dot_file(self, out_file_path):\n nx.nx_agraph.write_dot(self, out_file_path)", "def _write_dot(cls, destination, meta_dependencies, meta_rev_dependencies):\n with open(destination, \"w\") as out:\n out.write(\"digraph G {\\n\")\n out.write(\" graph [ dpi = 75 ];\\n\")\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write scripts for all steps in pipeline
def _write_scripts(self): logger.debug("Writing scripts.") if not os.path.exists(self.scriptdir): logger.debug("Output directory " + self.scriptdir + " does not exist. Creating. ") os.makedirs(self.scriptdir) for job in self._get_ordered_jobs(): job.write_scr...
[ "def generate(rule, steps):\n # START OF YOUR CODE\n pass\n\n # END OF YOUR CODE", "def generate_scripts(nodes, ppn):\n args = parser.parse_args()\n\n cyclus_script = render_cyclus_script(out_type='h5',\n in_dir='.', out_dir='.', log_dir='.')\n\n pbs_script = render_pbs_script(nodes, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the scratch dir of every added job, while not overriding any manually set scratch dir (default). If 'override' is set, it will ignore a previously set scratch dir.
def _set_scratch(self, global_scratch, override=False): for job in self.graph.nodes(): if not job.scratch or override: job.scratch = global_scratch
[ "def scratch_directory(tmpdir):\n return tmpdir.mkdir(\"scratch\")", "def apply_overrides(override_folders, runtime_folder):\n for override_folder in parser.override_folders:\n if os.path.exists(override_folder):\n logging.info(\"adding ksappend override folder %s\", override_folder)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch the market json data from the given page from our converter api
def get_json(page): url = 'https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd&order=market_cap_desc&per_page=100&page=' + str(page) + '&sparkline=false' response = http_req.get(url) return json.loads(response)
[ "def fetchJSON():\n\n curr_token_url = \"https://wowtokenprices.com/current_prices.json\"\n month_token_url = \"https://wowtokenprices.com/history_prices_30_day.json\"\n checkPages(curr_token_url, month_token_url)", "def _fetch_page(self, **params):\n r = requests.get(self.url, params=params)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method send SMS to a customer and notify him
def send_sms( self, number: hug.types.text='+79994413746', content: hug.types.text="Your Order is ready", ): state = notifications_rpc.send_sms(number, content) return state
[ "def sendSMS(message):\n sns_client = boto3.client('sns', 'us-west-2')\n mobileNumber = getContactDetails()\n response = sns_client.publish(PhoneNumber=mobileNumber, Message=message)", "def send_sms(self, num, text):\n message = self.client.messages.create(\n body = text, # optional\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download mission to savepath.
def download(mission, savepath): # warning there is a deadlock, # never do mission.lock.acquire in callback... print("Start downloading " + mission.title) mission.state = "DOWNLOADING" try: crawl(mission, savepath) # Check if mission is complete for ep in mission.episodes: if not ep.complete and not ep....
[ "def download_mission():\n print(\" Download mission from vehicle\")\n missionlist = list()\n cmds = vehicle.commands\n cmds.download()\n cmds.wait_ready()\n for cmd in cmds:\n missionlist.append(cmd)\n return missionlist", "def download_mission():\n # print \" Download mission from...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build filename with page and ext
def filename(self, page, ext=""): if not isinstance(page, str): page = "{:03d}".format(page) page = self.escape(page) if self.noepfolder: return "{ep_title}_{page}{ext}".format( ep_title=self.ep_title, page=page, ext=ext ) return "{page}{ext}".format( page=page, ext=ext )
[ "def page_url_to_filename( url ):\n\n filename = url[ 30 : -1 ].replace('/page', '_page') + '.html'\n\n return filename", "def _make_filename(url):\r\n # This is super naive.\r\n # Todo: Make filename when the crawler return per site\r\n # Todo: Make random filename if needed\r\n fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if current page exists in savepath.
def exists(self, page): if page is None: return False # FIXME: if multiple SavePath is created and sharing same .parent(), # they should share the .files too. if self.files is None: self.files = {} def build_file_table(file): _dir, name = path_split(file) base, ext = splitext(name) ...
[ "def _is_current_page(self):\n self.selenium.wait_until_location_contains(\n \"/new\", timeout=60, message=\"Record view did not open in 1 min\"\n )\n self.selenium.location_should_contain(\n \"/lightning/o/Program__c/\",\n message=\"Section title is not 'New Pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Grab html, images and move cursor to correct image by skip_pages
def init_images(self, skip_pages=0): self.get_html() self.get_images() try: # skip some images for _ in range(0, skip_pages): next(self.images) # get current image self.image = Image.create(next(self.images)) except StopIteration: self.image = None
[ "def scanFullPage(driver, out):\n analysis = analyzePage(driver)\n dir_name = \"pieces\"\n try:\n os.mkdir(dir_name)\n except FileExistsError:\n pass\n for i in range(analysis.count):\n saveScreenshot(driver, out, dir_name, i)\n scrollY = (i + 1) * analysis.height\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loop process until error. Has handle error limit.
def error_loop(process, handle_error=None, limit=10): errorcount = 0 while True: try: process() except Exception as er: # pylint: disable=broad-except traceback.print_exc() errorcount += 1 if errorcount >= limit: raise SkipEpisodeError(always=False) if handle_error: handle_error(er) # exc...
[ "def _collect_standard_error(self):\n while True:\n # Usually there should aways be a process\n if self._proc is not None:\n val = self._proc.stderr.readline()\n self._std_error.write(val)\n else:\n # Due to concurrency the process...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move to next item
def next(self): self.item = next(self.list)
[ "def nextItem(self):\n if self.item_count == 0: return\n \n prev_focus_index = self.focus_index \n while True:\n if self.focus_index < 0: self.focus_index = 0\n else: self.focus_index += 1\n\n if self.focus_index == prev_focus...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a contrast matrix that is valid for a given design matrix.
def contrast_matrix(contrast, design_matrix): columns = design_matrix.columns.tolist() C = np.zeros(len(columns)) _, names, weights = contrast for name, weight in zip(names, weights): C[columns.index(name)] = weight return C
[ "def compliance_matrix(self):\n return self._material_law.compliance_matrix", "def calculate_contrast(range_matrix, domain_matrix):\n if range_matrix.length != domain_matrix.length or range_matrix.width != domain_matrix.width:\n raise BadComparisonError\n # sum_rd = 0\n # for count in xrang...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fit OLS model and estimate residual autocorrelation with regularization.
def estimate_residual_autocorrelation(Y, X, tukey_m=None): from numpy.fft import fft, ifft # Fit initial iteration OLS model in one step B_ols, _, _, _ = np.linalg.lstsq(X, Y, rcond=None) Yhat_ols = X.dot(B_ols) resid_ols = Y - Yhat_ols # Compute empircal residual autocorrelation function ...
[ "def OLS_CV():\n N = [500, 5000]\n y_lim = [[0.15, 0.6], [0.26, 0.45]]\n repeat = 25\n sigma2 = 0.5\n model_ols = OLS()\n poly_deg_max = 9\n k = 5\n\n mse_train = np.zeros((repeat, poly_deg_max))\n mse_test = np.zeros((repeat, poly_deg_max))\n\n for n, limit in zip(N, y_lim): # calcul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default rule for choosing the Tukey taper window used by FSL.
def default_tukey_window(n): return int(np.floor(np.sqrt(n)))
[ "def weightedTweakUsing(*args, **kwargs):\n \n pass", "def weighting(window, cu=0.25):\n two_cu = cu * cu\n\n ci = variation(window, None)\n two_ci = ci * ci\n\n if not two_ci: # dirty patch to avoid zero division\n two_ci = 0.01\n\n divisor = 1.0 + two_cu\n\n if not divisor:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
column Gamma free ordering The lines of the matrix should be strongly chordally ordered.
def gamma_free_column_order(context_matrix): column_index_order = topological_sort(_column_intersection_graphs(context_matrix.matrix), lambda key: key) return _order_refinement(context_matrix.matrix, column_index_order)
[ "def order_matrix(mtr, n_column):\n mtr = sorted(mtr, key=lambda mtr: float(mtr[n_column]))\n return mtr", "def letters(self):\n l = [[\"x\" if row[i] else \" \" for i in range(len(row))] for row in self.grid]\n reversed_letters = [list(x) for x in zip(*l)]\n for i in range(0, 50, 5):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add new order ID to Logger
def record(self, order_id): self.order_ids[self.current_id] = order_id self.current_id = (self.current_id + 1) % len(self.order_ids)
[ "def record(self, order_id: T) -> None:\n\n self.order_ids.append(order_id)", "def trackOrder(self, orderid : int) -> str:\n\n success = False\n for order in self.orders:\n if order.order_id == orderid:\n print(\nf'Your order #{orderid} is sent to {order.location.cit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get last indexth order ID
def get_last(self, index): return self.order_ids[(self.current_id - index + len(self.order_ids) % len(self.order_ids))]
[ "def get_last(self, i: int) -> Optional[T]:\n\n if i >= len(self):\n return None\n\n return self.order_ids[-i - 1]", "def get_next_order_id(self):\n self.current_order_id += 1\n return self.current_order_id", "def get_last_id(self):\n\n return self._last_id", "def get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Init MultiYearNSRDB resource handler
def MultiYearNSRDBX_cls(): path = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_*.h5') return MultiYearNSRDBX(path)
[ "def __init__(self, db_name='ASP20010'):\n self.db_name = db_name", "def __init__(self) -> None:\r\n self.db = Db()\r\n self.init_db()", "def __init__(self):\n self.db = self._read_db()\n self._setup_dirs()", "def _init_inner_db():\n db.create_all(bind=\"octopus_db\")", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test map data extraction for all gids
def test_full_map(NSRDBX_cls, ds_name='ghi', timestep='2012-07-04 12:00:00'): extract_map(NSRDBX_cls, ds_name, timestep) NSRDBX_cls.close()
[ "def test_gid_map():\n\n output_request = ('system_capacity', 'cf_mean', 'cf_profile',\n 'extra_unused_data', 'winddirection', 'ws_mean')\n with tempfile.TemporaryDirectory() as td:\n out_fpath1 = os.path.join(td, 'bespoke_out2.h5')\n out_fpath2 = os.path.join(td, 'bespoke_o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test site data extraction
def test_site(MultiFileNSRDBX_cls, ds_name='dni'): extract_site(MultiFileNSRDBX_cls, ds_name) MultiFileNSRDBX_cls.close()
[ "def test_scraping(self):\n self._scraper.scrape()", "def getTestingData(self):", "def test_get_data(self):\n\n\t\t# Test to go here when best approach is decided for making requests.", "def test_text_from_html(self):\n # Filtered list of words extracted from test html page 1\n test_page1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test region data extraction
def test_region(MultiFileNSRDBX_cls, ds_name='ghi', region='Clallam', region_col='county'): extract_region(MultiFileNSRDBX_cls, ds_name, region, region_col=region_col) MultiFileNSRDBX_cls.close()
[ "def extract_regions(self, text):\n pass", "def test_get_region_data_sources(data_region, sources_region, expected_region):\n shape = (8, 10)\n coordinates = vd.grid_coordinates(data_region, shape=shape)\n points = vd.grid_coordinates(sources_region, shape=shape)\n region = _get_region_data_sou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test map data extraction for all gids
def test_full_map(MultiFileNSRDBX_cls, ds_name='ghi', timestep='2018-07-04 12:00:00'): extract_map(MultiFileNSRDBX_cls, ds_name, timestep) MultiFileNSRDBX_cls.close()
[ "def test_gid_map():\n\n output_request = ('system_capacity', 'cf_mean', 'cf_profile',\n 'extra_unused_data', 'winddirection', 'ws_mean')\n with tempfile.TemporaryDirectory() as td:\n out_fpath1 = os.path.join(td, 'bespoke_out2.h5')\n out_fpath2 = os.path.join(td, 'bespoke_o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test region data extraction
def test_region(MultiFileWindX_cls, ds_name='windspeed_50m', region='Klamath', region_col='county'): extract_region(MultiFileWindX_cls, ds_name, region, region_col=region_col) MultiFileWindX_cls.close()
[ "def test_region(MultiFileNSRDBX_cls, ds_name='ghi', region='Clallam',\n region_col='county'):\n extract_region(MultiFileNSRDBX_cls, ds_name, region,\n region_col=region_col)\n MultiFileNSRDBX_cls.close()", "def extract_regions(self, text):\n pass", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test custom WaveX get methods for 4d 'directional_wave_spectrum' dataset
def test_WaveX(gid): path = os.path.join(TESTDATADIR, 'wave/test_virutal_buoy.h5') ds_name = 'directional_wave_spectrum' with WaveX(path) as f: truth = f[ds_name, :, :, :, gid] index = pd.MultiIndex.from_product( [f.time_index, f['frequency'], f['direction']], names=[...
[ "def test_obs_waveform_get(external_getter, code):\n net, sta, loc, cha = code.split('.')\n\n st = external_getter.obs_waveform_get(code)\n assert(len(st) == 3)\n\n stats = st.select(component=\"Z\")[0].stats\n assert stats.network == net\n assert stats.station == sta", "def get_terror_waves_inf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test save_region to .h5
def test_save_region(WindX_cls, datasets): region = 'Providence' region_col = 'county' gids = WindX_cls.region_gids(region, region_col=region_col) meta = WindX_cls.meta.loc[gids].reset_index(drop=True) meta.index.name = 'gid' truth = {'meta': meta, 'coordinates': WindX_cls.lat_lon[...
[ "def save_image_to_h5(image, h5_path, *args, **kwargs):\n # TODO: Implement the method\n\n raise NotImplementedError", "def save_to_hdf5(h5group, obj, path='/'):\n return Hdf5Saver(h5group).save(obj, path)", "def saveToHDF5(self,Hdf5Group):\n Hdf5Group.attrs['cutOff']=self.cutOff\n Hdf5Gr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the get raster meta index functionality. Plotting is the best way to sanity check this.
def test_get_raster_index(plot=False): res_fp = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5') # use a custom meta df because NSRDB/WTK resource test files are too small fp = os.path.join(TESTDATADIR, 'wtk/hawaii_grid.csv') meta = pd.read_csv(fp) target = (16, -162) shape = (10, 5) ...
[ "def test_get_bad_raster_index():\n res_fp = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5')\n\n # use a custom meta df because NSRDB/WTK resource test files are too small\n fp = os.path.join(TESTDATADIR, 'wtk/hawaii_grid.csv')\n meta = pd.read_csv(fp)\n\n target = (-90, -162)\n shape = (1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the retrieval of a very large raster that needs to be chunked
def test_get_raster_index_big(plot=False): res_fp = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5') # use a custom meta df because NSRDB/WTK resource test files are too small fp = os.path.join(TESTDATADIR, 'wtk/hawaii_grid.csv') meta = pd.read_csv(fp) target = (16, -163) shape = (50, 5...
[ "def test_get_more_tiles(self):\n get_map(2016, range(75078, 75080), range(74956, 74957), \".\")\n self.assertEqual(os.path.exists(\"../74956_75078.png\"), True)\n self.assertEqual(os.path.exists(\"../74956_75079.png\"), True)\n img1 = Image.open(\"../74956_75078.png\")\n img2 = I...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test retrieval of raster index on skewed data in RI
def test_get_raster_index_skewed(plot=False): res_fp = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5') target = (41.25, -71.8) shape = (5, 5) with WindX(res_fp) as ext: meta = ext.meta gid_target, vector_dx, vector_dy, close = \ ext.get_grid_vectors(target) _, s...
[ "def test_get_bad_raster_index():\n res_fp = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5')\n\n # use a custom meta df because NSRDB/WTK resource test files are too small\n fp = os.path.join(TESTDATADIR, 'wtk/hawaii_grid.csv')\n meta = pd.read_csv(fp)\n\n target = (-90, -162)\n shape = (1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the raster retrieval on the NSRDB meta data which is sorted differently than wtk.
def test_get_raster_nsrdb(plot=False): res_fp = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5') # use a custom meta df because NSRDB/WTK resource test files are too small fp = os.path.join(TESTDATADIR, 'nsrdb/ri_full_meta.csv') meta = pd.read_csv(fp) target = (41.45, -71.74) shape = (1...
[ "def test_get_bad_raster_index():\n res_fp = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5')\n\n # use a custom meta df because NSRDB/WTK resource test files are too small\n fp = os.path.join(TESTDATADIR, 'wtk/hawaii_grid.csv')\n meta = pd.read_csv(fp)\n\n target = (-90, -162)\n shape = (1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the get raster meta index functionality with a bad target input
def test_get_bad_raster_index(): res_fp = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5') # use a custom meta df because NSRDB/WTK resource test files are too small fp = os.path.join(TESTDATADIR, 'wtk/hawaii_grid.csv') meta = pd.read_csv(fp) target = (-90, -162) shape = (10, 5) wit...
[ "def testAccessIncorrectIndex(self):\n self.assertRaises(ValueError,\n self.manager.ifDescr.__getitem__, (47, 18))\n self.assertRaises(ValueError,\n self.manager.ifDescr.__getitem__, \"nothing\")", "def test_inexistent_tile(self):\n self.asser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print out data keys in event descriptor.
def print_data_keys(schemas: dict): data_keys: dict = schemas[DocumentNames.descriptor]['properties']['data_keys'] for k, v in data_keys.get("properties", {}).items(): print("{}: {}".format(k, v.get("description", "")))
[ "def showdata(self):\n print self.data", "def print_data_headers(self):\n for header in self.header_dict.keys():\n print header", "def printEventInfo(self):\n\n print self.eventType + ' - ' + conDateNumToDateStr(self.numDate)", "def print_possible_events():\n print(\"Registere...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that determines if an object is an iterable, not including str.
def isiterable(obj): if isinstance(obj, str): return False else: return isinstance(obj, Iterable)
[ "def is_iterable(obj):\n # Speed: do not use iter() although it's more robust, see also https://stackoverflow.com/questions/1952464/\n return isinstance(obj, Iterable) and not isinstance(obj, (str, bytes))", "def is_non_string_iterable(obj):\n return not isinstance(obj, str) and isinstance(obj, Iterable)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively iterate through values in nested iterables, and return a flattened list of the inputted iterable.
def flatten(inp_iter): def inner(inp): for val in inp: if isiterable(val): for ival in inner(val): yield ival else: yield val return list(inner(inp_iter))
[ "def flatten(iter):\n out = []\n for x in iter:\n if not x:\n continue\n if isinstance(x, (list, tuple, set)):\n out += flatten(x)\n else:\n out.append(x)\n return out", "def flatten(iterable:Iterable) -> generator:\r\n consumable = regurge(ite...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turns the inputed hparams into a standardized experiment name
def name_from_hparams(hparams, short=False): if short: list_name = flatten([ hparams.exp_prefix, hparams.dataset, hparams.model, 'seed' if hparams.seed else '', '' if not hparams.seed else hparams.seed, hparams.exp_suffix, ]) ...
[ "def update_experiment_name(self):\n if self.echo:\n self.experiment_name = self.experiment_name.replace('Ramsey', 'Echo')", "def get_model_name(args):\n hiddensizes = '_' + str(args.D_h_features)\n model_name = const.MODEL_DIRECTORY + str(args.id) + '_model' + hiddensizes + '.pth'\n an...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given an attempted move, parse the input into a "move list" >>> parse_move("A 3") [0, 3] >>> parse_move("this isn't valid") None
def parse_move(s):
[ "def validate_move(state, move):\n N = len(state)\n assert move.isdigit(), \"Move must be numeric (base 10)\"\n move = int(move)\n assert 0 < move <= N ** 2, f\"Move must be between 1 and {N ** 2} inclusive\"\n x, y = index(move, N=N)\n assert state[x][y] is None, \"This position is already marked...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Return list with present environments. This corresponds to the text between ``\begin{...}`` and ``\end{...}``.
def environments(text: str) -> list[str]: ret = [] curly_braces = find_matching(text, "{", "}", ignore_escaped=True) for i in re.finditer(r"\\begin{.*}", text): opening = i.span(0)[0] + 6 closing = curly_braces[opening] i = opening + 1 ret += [text[i:closing]] return l...
[ "def environments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"environments\")", "def environment_list(self):\n from .file import File\n\n environments_list = []\n boards = self.boards_list()\n environments = self.get_selected_boards(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
``.rstrip()`` for each line.
def _rstrip_lines(text: str) -> str: return "\n".join([line.rstrip() for line in text.splitlines()])
[ "def fix_whitespace(self):\n lines = []\n blank = False\n for line in self.lines:\n line = line.rstrip()\n if line:\n if blank and lines:\n lines.append('\\n')\n lines.append(line + '\\n')\n blank = False\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Align ``&`` and ``\\`` of all lines that contain those alignment characters.
def _align(text: str, align: str = "<", maxwidth: int = 100) -> str: lines = [line.strip() for line in text.strip().splitlines()] width = [] if len(lines) <= 3: return "\n".join(lines) for i in range(1, len(lines) - 1): # split at & and \\, and strip all spaces around line = r...
[ "def alignment(self):\n\n self.matches = \"\"\n\n for i in range(len(self.x_align)):\n\n if self.x_align[i] == self.y_align[i]:\n\n self.matches += \"|\"\n\n self.edges += 1\n\n else:\n\n self.matches += \" \"", "def format_alignment...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check per character if it is a placeholder.
def _is_placeholder(text: str, placeholders: list[Placeholder]) -> list[bool]: search_placeholder = list(set(list({i.search_placeholder for i in placeholders}))) ret = {} for search in search_placeholder: if search is None: continue indices = {text[i.span()[0] : i.span()[1]]: ...
[ "def test_is_placeholder(placeholder, expected):\n assert templates_utils.is_placeholder(placeholder=placeholder) == expected", "def match_placeholder_shown(self, el: bs4.Tag) -> bool:\n\n match = False\n content = self.get_text(el)\n if content in ('', '\\n'):\n match = True\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Extract the keys of 'float' commands (e.g. ``\includegraphics{...}``, ``\bibliography{...}``) and reconstruct their filenames. This operation is readonly.
def float_filenames(self, cmd: str = r"\includegraphics") -> list[tuple[str]]: assert self.dirname is not None # mimic the LaTeX behaviour where an extension is automatically added to a # file-name without any extension def filename(dirname, name): if os.path.isfile(os.path...
[ "def rename_float(self, old: str, new: str, cmd: str = r\"\\includegraphics\"):\n\n text = self.main.split(cmd)\n\n for i in range(1, len(text)):\n pre, key = text[i].split(\"{\", 1)\n key, post = key.split(\"}\", 1)\n if key != old:\n continue\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Rename a key of a 'float' command (e.g. ``\includegraphics{...}``, ``\bibliography{...}``). This changes the TeX file.
def rename_float(self, old: str, new: str, cmd: str = r"\includegraphics"): text = self.main.split(cmd) for i in range(1, len(text)): pre, key = text[i].split("{", 1) key, post = key.split("}", 1) if key != old: continue if text[i][0] not...
[ "def set_float(self, key: str, value: float):\n self.set_str(key, f\"{float(value):.{Config.FloatPrecision}f}\")", "def RenameKey(self, old_key, new_key):\n\n # Rename header's key\n i = self.header.index(old_key)\n self.header[i] = new_key\n\n # Rename key in data structure\n for i, item in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Read the citation keys in the TeX file (keys in ``\cite{...}``, ``\citet{...}``, ``\citep{...}``).
def citation_keys(self) -> list[str]: curly_braces = find_matching(self.main, "{", "}", ignore_escaped=True) cite = [] for i in re.finditer(r"(\\cite)([pt])?(\[.*\]\[.*\])?(\{)", self.main): o = i.span()[1] c = curly_braces[o - 1] cite += list(filter(None, s...
[ "def ConvertCiteKeys(line):\n line = re.sub(r'([A-Z][a-z]+:\\d{4}[a-z]{2})\\}\\{([A-Z][a-z]+:\\d{4}[a-z]{2})', r'\\1\\2', line)\n \n \"\"\"This regex will add \\citep in front of Papers citekeys (It will fail if suffixes or \n prefixes are included. I need to add functionality for this)\"\"\"\n line = re.sub(r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove lines that are entirely a comment.
def remove_commentlines(self): tmp = self.main.splitlines() tmp = list(itertools.filterfalse(re.compile(r"^\s*%.*$").match, tmp)) self.main = "\n".join(tmp)
[ "def strip_comments(text):\n \n # (m?) enables multiline mode\n return re.sub(r'(?m)^ *#.*\\n?', '', text).strip()", "def strip_comment(line):\n tokens = []\n try:\n for tok in py_tokenize.generate_tokens(StringIO(line).readline):\n token = Token(tok)\n if token.is_comm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove comments form the main text.
def remove_comments(self): self.main = remove_comments(self.main)
[ "def remove_commentlines(self):\n\n tmp = self.main.splitlines()\n tmp = list(itertools.filterfalse(re.compile(r\"^\\s*%.*$\").match, tmp))\n self.main = \"\\n\".join(tmp)", "def remove_comments(text):\n return re.sub(r' //.*\\n', r'', text)", "def strip_comments(text):\n \n # (m?...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Change label in ``\label{...}`` and ``\ref{...}`` (like) commands.
def change_label(self, old_label: str, new_label: str, overwrite: bool = False): if old_label == new_label: return if not overwrite: if new_label in self.labels(): raise OSError(f'Label "{new_label:s}" already exists') old = re.escape(old_label) ...
[ "def set_label(self, label):", "def update_label():\n \n # add code here to update the label_var variable (which is displayed in our label)", "def setLabel(self, lb: str):\n self.instrParts = (lb,) + self.instrParts[1:]", "def write_label(self, label):\n self._write_asm_commands(['({})'.fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Limit a BibTeX file to a list of keys.
def bib_select(text: str, keys: list[str]) -> str: text = "\n" + text bib = list(filter(None, text.split("@")))[1:] out = [] for i in bib: if re.match(r"(string\{)(.*)", i): continue if re.match(r"(Comment\ )(.*)", i, re.IGNORECASE): continue if re.mat...
[ "def fix_bibtex4publish():\n if len(sys.argv) < 1:\n _usage_makefile()\n sys.exit(1)\n\n bibfiles = sys.argv[1:]\n for bibfile in bibfiles:\n if not bibfile.endswith('.bib'):\n print bibfile, 'is not a BibTeX file'\n _abort()\n shutil.copy(bibfile, bibfile ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Indent TeX file, see ``help``.
def texindent_cli(args: list[str]): parser = _texindent_parser() args = parser.parse_args(args) assert all([os.path.isfile(file) for file in args.files]) for filepath in args.files: filepath = pathlib.Path(filepath) orig = filepath.read_text() tex = TeX(orig) tex.pream...
[ "def indent():\n\timport subprocess\n\tsubprocess.Popen('vim +\"normal! gg=G\" +wqa ' + output_path, shell=True)", "def _indent(self, dedent=True):\n num_newlines = self._get_cursor().selectedText().count(u\"\\u2029\")\n save_cur = self._get_cursor()\n cur = self._get_cursor()\n\n # mo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that dnsmasq is installed
def test_dnsmasq_is_installed(host): assert host.package("dnsmasq").is_installed
[ "def do_dns_check(self):\n\n print \"\\nPerforming DNS queries against dnsmasq...\\n\"\n\n dns_resolver = resolver.Resolver(configure=False)\n dns_resolver.nameservers.append(self.dns_host_ip)\n\n # Set dns_check to 1 (good) by default\n dns_check = 1\n\n name_to_resolve = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that haproxy is installed
def test_haproxy_is_installed(host): assert host.package("haproxy").is_installed
[ "def test_dnsmasq_is_installed(host):\n assert host.package(\"dnsmasq\").is_installed", "def test_packages(host):\n\n assert host.package('curl').is_installed", "def _check_host(self):\n if not self.available:\n _LOGGER.error(\"No HassOS availabe\")\n raise HassioNotSupportedE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Infer a key from the parts.
def inferKey(score): # (1) Find the keys of each part. try: allPartKeys = findPartKeys(score) except KeyFinderError as kfe: kfe.logerror() return False # (2) And then find the keys of the score. try: key = findScoreKeys(score) except KeyFinderError as kfe: ...
[ "def normalize_key(key: Any):", "def normalize_key(key):\n\n if isinstance(key, str):\n group, _, key = key.partition(\".\")\n elif isinstance(key, tuple):\n group, key = key\n else:\n raise TypeError(f\"invalid key type: {type(key).__class__}\")\n return group, key or None", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set a default domain from available arguments Migrated from clientmanager.setup_auth()
def _auth_default_domain(self, config): identity_version = config.get('identity_api_version', '') auth_type = config.get('auth_type', None) # TODO(mordred): This is a usability improvement that's broadly useful # We should port it back up into os-client-config. default_domain =...
[ "def _auth_default_domain(self, config):\n\n identity_version = str(config.get('identity_api_version', ''))\n auth_type = config.get('auth_type', None)\n\n # TODO(mordred): This is a usability improvement that's broadly useful\n # We should port it back up into os-client-config.\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get auth plugin and validate args
def load_auth_plugin(self, config): loader = self._get_auth_loader(config) config = self._validate_auth(config, loader) auth_plugin = loader.load_from_options(**config['auth']) return auth_plugin
[ "def _validate_auth(self, config, loader, fixed_argparse=None):\n # May throw a keystoneauth1.exceptions.NoMatchingPlugin\n\n plugin_options = loader.get_options()\n\n msgs = []\n prompt_options = []\n for p_opt in plugin_options:\n # if it's in config, win, move it and...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate auth plugin arguments
def _validate_auth(self, config, loader, fixed_argparse=None): # May throw a keystoneauth1.exceptions.NoMatchingPlugin plugin_options = loader.get_options() msgs = [] prompt_options = [] for p_opt in plugin_options: # if it's in config, win, move it and kill it from...
[ "def _validate_auth(self, config, loader, fixed_argparse=None):\n # May throw a keystoneauth1.exceptions.NoMatchingPlugin\n\n plugin_options = loader.get_options()\n\n msgs = []\n prompt_options = []\n for p_opt in plugin_options:\n # if it's in config, win, move it and...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload a new entity to wikipedia
def upload_entity(auth: OAuth1, entity): return api_call(auth, 'wbeditentity', { 'new': 'item', 'data': json.dumps(entity), })
[ "def save_article_in_db(driver: WebDriver, article_title: str):\n article_content = driver.find_element_by_class_name(\"post-content\").text\n article_author = driver.find_element_by_class_name(\"author-name\").text\n new_article = Article(\n article_author=article_author,\n article_title=art...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses attachments details from raw attachments response.
def parse_attachments_list(raw_attachments): attachments_regex = re.compile(r'(\d+): (.+) \((.+) \/ (.+)\)') attachments_list = attachments_regex.findall(raw_attachments) return attachments_list
[ "def parse_attachment_content(attachment_id, raw_attachment_content):\n # type: (str, str) -> str\n attachment_content_pattern = re.compile(r'Content: (.*)', flags=re.DOTALL)\n attachment_content = attachment_content_pattern.findall(raw_attachment_content)\n if not attachment_content:\n return_er...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses raw attachment response into the attachment content
def parse_attachment_content(attachment_id, raw_attachment_content): # type: (str, str) -> str attachment_content_pattern = re.compile(r'Content: (.*)', flags=re.DOTALL) attachment_content = attachment_content_pattern.findall(raw_attachment_content) if not attachment_content: return_error('Could...
[ "def process_raw_email(raw, include_headers):\n message = email.message_from_string(raw)\n mailheaders = Parser().parsestr(raw, True)\n body = ''\n other_headers = '\\n'.join(\n [\"%s: %s\" % (k, getheader(v)) for k, v in mailheaders.items() if k not in ('Date', 'Message-ID', 'From', 'To', 'Subje...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Accepts ticket ID and history ID as input and returns a dictionary of ticket history entry properties
def get_ticket_history_by_id(ticket_id, history_id): suffix_url = 'ticket/{}/history/id/{}'.format(ticket_id, history_id) raw_history = http_request('GET', suffix_url) return parse_history_response(raw_history.text)
[ "def _get_ticket_history(self, rt, requesting_username, ticket_id):\n ticket_history = rt.getTicketHistory(ticket_id)\n ticket_history = list(filter(lambda h: h['Type'] in ALLOWED_HISTORY_TYPES, ticket_history))\n for entry in ticket_history:\n if entry['Type'] == \"Status\":\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses raw history string into dict
def parse_history_response(raw_history): # type: (str) -> dict keys = re.findall(r'^([a-z|A-Z]+):', raw_history, flags=re.MULTILINE) values = re.split(r'\n[a-z|A-Z]+:', raw_history)[1:] if len(keys) != len(values): return {} current_history_context = {key.upper() if key == 'id' else key: val...
[ "def logline_to_dict(logline: str) -> dict:\n data = {}\n pairs = re.split('(?<!:):(?!:)', logline.strip().strip('\\0'))\n for p in pairs:\n p = p.replace('::',':')\n keyval = p.split('=')\n try:\n data[keyval[0]] = keyval[1]\n except IndexError as e:\n log...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the link IDs from the ticket link response
def parse_ticket_links(raw_links): # type: (str) -> list links = [{'ID': link} for link in re.findall(r'/ticket/(\d+)', raw_links)] if raw_links else [] return links
[ "def extract_urls(response):\n\n # for testing\n # print(len(response[0].get(\"items\")))\n # print(response[0].get(\"items\")[0].get(\"link\"))\n\n if response[0].get(\"items\") is not None:\n # items list contains any items\n url_list = [item.get(\"link\") for item in response[0].get(\"i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Integrate the snapshot as a set of test particles in an external \ potential
def _integrate_test_particle(self,t,pot): #Integrate all the orbits for o in self.orbits: o.integrate(t,pot) #Return them as a set of snapshots out= [] for ii in range(len(t)): outOrbits= [] for o in self.orbits: outOrbits.appen...
[ "def sample_efield(self):\n efield = self.efield\n ngs = self.ngs\n cv = 1e-5\n vacuum = []\n non_vacuum = []\n surface = []\n vac_ijk = []\n nvac_ijk = pd.DataFrame()\n surf_ijk = []\n\n v = 0 \n n = 0\n vc = 0\n nc = 0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an input tensor (e.g., the outputs of a LSTM), do mean pooling over the last dimension of the input. For example, if the input was the output of a LSTM of shape (batch_size, sequence length, hidden_dim), this would calculate a mean pooling over the last dimension (taking the padding into account, if provided) to ...
def mean_pool(input_tensor, sequence_length=None): with tf.name_scope("mean_pool"): # shape (batch_size, sequence_length) input_tensor_sum = tf.reduce_sum(input_tensor, axis=-2) # If sequence_length is None, divide by the sequence length # as indicated by the input tensor. i...
[ "def avg_pool_2d(x, size=(2, 2), stride=(2, 2), name='avg_pooling', padding='VALID'):\n size_x, size_y = size\n stride_x, stride_y = stride\n o = tf.nn.avg_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding=padding,\n name=name)\n print(\"After \" ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the outputs of a LSTM, get the last relevant output that is not padding. We assume that the last 2 dimensions of the input represent (sequence_length, hidden_size).
def last_relevant_output(output, sequence_length): with tf.name_scope("last_relevant_output"): batch_size = tf.shape(output)[0] max_length = tf.shape(output)[-2] out_size = int(output.get_shape()[-1]) index = tf.range(0, batch_size) * max_length + (sequence_length - 1) flat =...
[ "def get_last_step(inputs: tf.Tensor, seq_length: tf.Tensor) -> tf.Tensor:\n batch_range = tf.range(tf.shape(seq_length)[0])\n\n non_empty_seq = tf.sign(seq_length)\n safe_indices = tf.cast((seq_length - non_empty_seq), dtype=tf.int32)\n indices = tf.stack([batch_range, safe_indices], axis=1)\n result = tf.gat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List top game titles by viewer count
async def top(self, ctx): if ctx.invoked_subcommand is None: data = await twitch_api_call(ctx, endpoint='games/top', channel='', params='?first=10') games = [] for count, game in enumerate(data['data'], start=1): game = game['name'] game = tex...
[ "def display_top_games(collection, count, detailed):\n if detailed:\n print(f\"{'Rank':<6}{'Rating':<8}{'Weighted':<11}{'Plays':<7}\" \\\n f\"{'Last Played':<13}{'Game':<100}\")\n else:\n print(f\"{'Rank':<5}{'Game':<100}\")\n rank = 1\n rgx = re.compile('[%s]' % 'b\\'\\\"')\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Breaks up a unified numpy storage into one numpy storage per child
def numpy_shatter(self): assert self.storage == "numpy" parent = self._parent() if parent is not None and parent.storage == "numpy": parent.numpy_shatter() data = {} children = {} for prop in self._props: p = self._props[prop] if p["ele...
[ "def __build_storage(self):\n try:\n tmp_storer = tables.open_file(\n self._hdf5_filepath, mode=\"w\", title=self._storage_name\n )\n\n tmp_storer.create_group(\"/\", \"tre\", title=\"root-tree\")\n tmp_storer.create_group(\"/tre\", \"master\", title...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests for token refresh view
def test_RefreshTokensView(self): # Creating the default user User.objects.create_user(email='testuser1@gmail.com', password='password') # Executing all the requests for x in self.REQUESTS['RefreshTokenView']['tests']: request = self.client.post( self.REQUES...
[ "def test_get_new_access_token(self):\n # get new access token and confirm side effects\n self.authorizer._get_new_access_token()\n self.on_refresh.assert_called_once()\n self.assertNotEqual(self.access_token, self.authorizer.access_token)\n\n # confirm AuthClient is still usable ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dictionary with all the docs available in the data folder. Maps the rg_number from the database to the file name
def getDocs(): docs = dict() double_transcripts=0 for file in glob.glob(INPUT_FOLDER+"*"): # get RG number rg_number = file.split('/')[-1].split("_")[0] # find last occurrence of '.' and replace it with '*' k = rg_number.rfind(".") mongo_rg = rg_number[:k] + "*" + r...
[ "def get_docs():\n fields = ['Title', 'Description', 'OperatingSystem']\n all_docs = {}\n\n connection = Connection()\n db = connection.linux_laptops\n docs = db.docs\n\n for f in fields:\n all_docs.update(_get_docs(docs, f))\n return all_docs", "def get_documents(file_type: str, args)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Goes over all the transcripts in the data folder and creates a tracker for all the interviews
def createTracker(): # query for interview ids result = h.query(DB, INPUT_COLLECTION, {}, {'id':1, 'rg_number': 1, 'fnd_doc_filename': 1} ) docs = getDocs() for interview in result: # instantiate document to be inserted and get rg_number document = dict() rg_number = intervi...
[ "def generate_timit_data():\n root = 'LibriSpeech'\n\n for subdir, dirs, files in os.walk(root):\n for file in files:\n if \"txt\" in file:\n os.system(\"mv {} ./data/transcripts/\".format(os.path.join(subdir, file)))\n elif \"flac\" in file:\n durati...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a dictionary of scans for use in other test functions
def load_scans(start,stop): scansin=collections.OrderedDict() try: for idx in range(start,stop+1): scansin[idx]=npyio.load_data(os.path.join(os.path.dirname(__file__),'filetypes/HB1A/HB1A_exp0718_scan0%d.dat'%idx)) return scansin except: pytest.failed('scan load failed in scan col...
[ "def getAnalysesDict(self):\n results = self._resultDict\n results['bug_found'] = self.foundBug\n results['failed'] = self.failed\n results['timeout_hit'] = self.ranOutOfTime\n\n\n return results", "def test_callsites_pruebas(setup):\n assert setup.assign_call_find() == {('generator',): [41, 43...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialisation function. It sets the player ID variable to be used later by other functions, as well as change the directory to the appropriate player.
def __init__(self, playerId): self.loc = os.getcwd() os.chdir(self.loc+"/static/json/"+str(playerId)) self.playerId = playerId
[ "def init_new_player(player):\r\n # the FIRST_LOGIN flags are necessary for the system to call\r\n # the relevant first-login hooks.\r\n #if player.character:\r\n # player.character.db.FIRST_LOGIN = True\r\n player.db.FIRST_LOGIN = True", "def __init_player(self, name, client_id):\n self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produces a list of all the matches that a player has data for.
def getAllPlayerMatches(self): os.chdir(self.loc+"/static/json/"+str(self.playerId)+"/matchData") amount = glob.glob("*") temp = [] if len(amount) > 1: for items in amount: temp.append(dict(json.load(open(items, "r")))) os.chdir(self.loc) ...
[ "def get_all_match_for_player_id(player_id):\r\n ps = tables.players_stats.as_('ps')\r\n m = tables.matches.as_('m')\r\n\r\n q = Query.from_(ps\r\n ).join(m, enums.JoinType.inner).on(m.match_id == ps.match_id\r\n ).select(\r\n m.match_id.as_('match_id'),\r\n ps.sc_player...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a definition of an autoencoder (including the size of the hidden and visible layers and the theta parameters) and an input data matrix (each column is an image patch, with 1 or more columns), compute the feedforward activation for the output visible layer for each data column, and return an output activation matr...
def autoencoder_feedforward(theta, visible_size, hidden_size, data): ### YOUR CODE HERE ### # theta is an array with order [{W(1)}, {W(2)}, {b(1)}, {b(2)}] # in W, ROWS INDICATE "TO" NODES AND COLUMNS INDICATE "FROM" NODES # Pull values from theta vector and reshape: W1 = theta[0:(hidden_size * vis...
[ "def calc_activation(self, inp):\n inp_rightform = ny.matrix( inp ).T\n self.a = [inp_rightform]\n tmp = ny.dot( self.weights_layer[0], inp_rightform ) + self.bias[0]\n tmp = self.activation_function(tmp)\n\n self.a.append(tmp)\n\n for i in range(self.number_hidden_layers-1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is a helper function to streamline saving the results of an autoencoder. The visible_size and hidden_size provide the information needed to retrieve the autoencoder parameters (w1, w2, b1, b2) from theta.
def plot_and_save_results(theta, visible_size, hidden_size, root_filepath=None, train_patches=None, test_patches=None, show_p=False, **params): filepath = 'model' if root_filepath: filepath = root_filepath + '_' + filepath save_model(theta, visibl...
[ "def save_params(self):\n self.autoencoder.save_parameters('/Users/wenqin/Documents/GitHub/grade-12-assignments-wenqinYe/Culminating/parameters/encoder')", "def autoencoder_feedforward(theta, visible_size, hidden_size, data):\n\n ### YOUR CODE HERE ###\n # theta is an array with order [{W(1)}, {W(2)}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the given product has at least one of the selected categories of the criterion.
def isValid(self, product): cm = ICategoryManagement(product) product_categories = ["/".join(c.getPhysicalPath()) for c in cm.getTopLevelCategories()] criteria_categories = self.context.getCategories() for criteria_category in criteria_categories: if criteria_category in pro...
[ "def is_satisfied(self, item: Product) -> bool:\n return all(spec.is_satisfied(item) for spec in self.specs)", "def has_category(item):\n return item in DataHelper.item2category", "def has_a_product(obj):\n return \"products\" in obj and len(obj[\"products\"]) > 0", "def is_product_level_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pass in two args and a test, if arg1 != test return arg2.
def three_arg_sel(arg1: str, arg2: str, test: str) -> str: return arg2 if arg1 == test else arg1
[ "def test_two_args(self):\n self.assertTrue(is_testable(), NOT_TESTABLE)\n try:\n subprocess.check_call(['./Act2', 'spam', 'eggs'])\n except subprocess.CalledProcessError as e:\n if e.returncode == 17:\n return\n self.fail('\"./Act2 spam eggs\" did no...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks path for existance and permissions and tries to make the path.
def path_set(path: Path) -> bool: tryagain = True access: int = 0o755 if path.exists(): tryagain = True else: md = su.get_new_path(path) if md.lower() == 'yes' or 'y': try: path.mkdir(mode=access) except OSError as error: lo...
[ "def _check_and_create(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def _create_path_ignore_existing(self, path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError as e:\n # File exists (17) is okay\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print id3 tags in plain english. One per line.
def print_tags(self, verbose=False): # import pdb; pdb.set_trace() for tag in self.audio: if tag in _tag_types: # stdout.write to suppress newline. sys.stdout.write(_tag_types[tag] + ': ') print self.audio[tag] elif verbose: ...
[ "def showTextwithTag(l):\n \n doc=l[0] #the nlp doc\n options=l[1] #displacy options\n return displacy.render(doc, style='ent', options=options)", "def print_tags_raw(self):\n print self.audio.pprint()", "def tag_file_id3(podcast_sendung):\n lib_cm.message_write_to_console(ac, u\"tag files...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print raw id3 tags. One per line.
def print_tags_raw(self): print self.audio.pprint()
[ "def print_tags(self, verbose=False):\n# import pdb; pdb.set_trace()\n for tag in self.audio:\n if tag in _tag_types:\n # stdout.write to suppress newline.\n sys.stdout.write(_tag_types[tag] + ': ')\n print self.audio[tag]\n elif verbo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
City constructor. name must not be zero length
def __init__(self, subdivision: str, name: str, id=None): super().__init__() # Test case for name length if len(name) == 0: raise LengthError('City name length is 0') if id is not None: self.id = id sel...
[ "def __init__(self, city: str, postoffice: int):\n self.city = city\n self.postoffice = postoffice", "def city(name):\n global city\n import ephem.cities\n city = ephem.cities.city\n return city(name)", "def initializeCity(self):\n\n title=self._city_obj.getName()\n price...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a string representation of the City object.
def __repr__(self): if self.id is None: return '<City: subdivision: {subdivision}\n' + \ ' name: {name}>'.format( subdivision=str(self.subdivision), name=self.name ) else: return '<City: id:...
[ "def get_city_info_str(self):\n info_list = textwrap.wrap(self.description)\n return ' ' + '\\n '.join(info_list)", "def test_str_City(self):\n kansas = City()\n string = \"[City] ({}) {}\".format(kansas.id, kansas.__dict__)\n self.assertEqual(string, str(kansas))", "def l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the position of the GUI when the camera moves
def update_gui_position(self, offset=0): self.score_text.center_x += CAMERA_SPEED self.score_nb_text.center_x += CAMERA_SPEED self.game_over_gui.center_x += CAMERA_SPEED
[ "def positionCamOnTrack(self):\n xPos = cmds.getAttr(self._startingTrack+\".translateX\")\n cmds.setAttr(self._mainCam[0]+\".translateX\", xPos)\n\n zPos = cmds.getAttr(self._startingTrack+\".translateZ\")\n cmds.setAttr(self._mainCam[0]+\".translateZ\", zPos)\n\n cmds.setAttr(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the current index of the tab in the main tab bar raises index error if the tab does not exist in the bar
def getCurrentIndex(self): for i in range(MpGlobal.Window.tabMain.count()): widget = MpGlobal.Window.tabMain.widget( i ) if widget == self: return i raise IndexError("Tab not in TabBar. index out of range.")
[ "def _get_index(self) -> \"size_t\" :\n return _core.ToolbarTab__get_index(self)", "def selectTab(self, pos):\n _tabbar = self.tabBar()\n for index in range(_tabbar.count()):\n rect = _tabbar.tabRect(index)\n if rect.contains(pos):\n return index\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate random latitude and longitude points from a given center (just to simulate ;) ) Params
def random_point_generator(self,num_points): origin_point = (-12.0432,-77.0141) latitude = [] longitude = [] for row in range(num_points): temp = float(rnd.randint(0,100)) latitude.append(origin_point[0] + rnd.random()/100) longitude.append(origin_poi...
[ "def generate_random_coordinate(min_lat=-90,max_lat=90,min_lon=-180,max_lon=180,precision=6,seed=None):\n \n if(isinstance(seed,int)):\n random.seed(seed)\n \n latitude = round(random.uniform(min_lat,max_lat),precision)\n \n longitude = round(random.uniform(min_lon,max_lon),precision)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate random daily demands given the needed distribution points Params
def generate_demand(self,num_points): latitude,longitude = self.random_point_generator(num_points) demand = np.array([np.random.randint(10,100) for observation in range(num_points)]) return latitude, longitude, demand
[ "def random_date_generator(start_date):\n\n\t\trange_in_days = current_date + np.timedelta64(-T, \"D\") - np.datetime64(start_date)\n\t\tdays_to_add = np.arange(1, range_in_days-1)\n\t\trandom_date = np.datetime64(start_date) + np.random.choice(days_to_add, n, replace=False)\n\t\treturn random_date", "def gen_dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get Companies By Company Group info
def company_group_companies_get(self, company_group_id=None, account=None): self.init.authHeader(account) data = { "op_code": "get", "get_data": { "company_group_id": company_group_id } } resp = self.init.request('post', "/company-group...
[ "def company_chcek(self):\n ids = []\n for company in self.env['res.company'].search([('id', '!=', self.company_id.id)]):\n ids.append(company.id)\n return {\n 'domain': {\n 'new_company_id': [('id', 'in', ids)]\n }\n }", "def mongo_show_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get all the students wanting this package (according to wish_id) that aren't assigned yet
def get_unassigned_wishers(self, package_number, wish_id): # get Package object package = self.packages[package_number] # get all students having this package as their wish (according to wish_id) that aren't assigned return [wisher for wisher in package.wishers[wish_id] if wisher not...
[ "def assign_all_uncleanly(self):\r\n # go through all unassigned students\r\n for wish_id in range(self.amount_wishes):\r\n for student_number in self.students.keys():\r\n if student_number in self.assigned_students.keys():\r\n # this student is already ass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
assign student to package
def assign(self, student_number, package_number): if student_number in self.assigned_students.keys() or package_number in self.assigned_students.values(): raise ValueError("Trying to assign an already assigned student or an already assigned package!") self.assigned_students[student_number...
[ "def set_student_attributes(s, t):\n l = len(list_students)+1 #!helper\n s.gpa = random.randint(1, 100)\n s.grade_level = t.grade_level #!not DRY\n s.name = ('Student'+ str(l+1) + '_G_' + str(s.grade_level)) #come up with a better naming\n s.current_teacher = t\n # print('This is the new Student:'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
recursive function (calling resolve_after_assignment) see if this package is only wanted by one student (according to wish_id) and then assign it return True when this is a package wanted by multiple students, else False
def assign_package_if_possible(self, package_number, wish_id): # get unassigned wishers unassigned_wishers = self.get_unassigned_wishers(package_number, wish_id) # when only a single student wants this package, they get it if len(unassigned_wishers) == 1: this_student_nu...
[ "def resolve_after_assignment(self, student_number, wish_id):\r\n # when this wish doesn't exists\r\n if wish_id < 0:\r\n return\r\n\r\n # now this package has one student less wanting it\r\n package_number = self.students[student_number].wishes[wish_id]\r\n # when this...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
recursive function (calling itself and assign_package_if_possible) see if an assignment (student_number got assigned) resolved a problem with a more important wish (wish_id)
def resolve_after_assignment(self, student_number, wish_id): # when this wish doesn't exists if wish_id < 0: return # now this package has one student less wanting it package_number = self.students[student_number].wishes[wish_id] # when this package is not ass...
[ "def assign_package_if_possible(self, package_number, wish_id):\r\n # get unassigned wishers\r\n unassigned_wishers = self.get_unassigned_wishers(package_number, wish_id)\r\n # when only a single student wants this package, they get it\r\n if len(unassigned_wishers) == 1:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(cleanly) assign all packages that are wanted by only one student (according to wish_id) and check if that assignment solved a problem with a more important wish return all the packages that are wanted by multiple students
def assign_packages(self, wish_id, disallowed_packages): # numbers of all packages wanted by multiple students highly_wanted_package = [] for package_number in self.packages.keys(): # don't try to assign this package if it is disallowed or already assigned if package...
[ "def assign_package_if_possible(self, package_number, wish_id):\r\n # get unassigned wishers\r\n unassigned_wishers = self.get_unassigned_wishers(package_number, wish_id)\r\n # when only a single student wants this package, they get it\r\n if len(unassigned_wishers) == 1:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
go through all wishes and just assign the first student with their wished package > other students wanting this package in the same way won't get it
def assign_all_uncleanly(self): # go through all unassigned students for wish_id in range(self.amount_wishes): for student_number in self.students.keys(): if student_number in self.assigned_students.keys(): # this student is already assigned ...
[ "def assign_package_if_possible(self, package_number, wish_id):\r\n # get unassigned wishers\r\n unassigned_wishers = self.get_unassigned_wishers(package_number, wish_id)\r\n # when only a single student wants this package, they get it\r\n if len(unassigned_wishers) == 1:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
go through all unassigned students and unassigned packages and just assign with no regard to their wishes should only be used when there are only unwanted packages left to be assigned
def assign_all_dirtily(self): # get all unassigned packages unassigned_package_numbers = [package_number for package_number in self.packages.keys() if package_number not in self.assigned_students.values()] for wish_id in range(self.amount_wishes): ...
[ "def assign_all_uncleanly(self):\r\n # go through all unassigned students\r\n for wish_id in range(self.amount_wishes):\r\n for student_number in self.students.keys():\r\n if student_number in self.assigned_students.keys():\r\n # this student is already ass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts MIDI pitch to octave number
def pitch_to_octave(pitch): return int(math.floor(pitch/12)-1)
[ "def midi_to_pitch(midi: int) -> float:\r\n return 440 * (2 ** ((midi - 69) / 12))", "def note_to_midi(note: Note) -> int:\r\n c0_code = 12\r\n name, octave = note\r\n name, shift = split_to_base_and_shift(name, name_before_accidental=True)\r\n for candidate, offset in zip(MAJOR_FROM_C, MAJOR_SCALE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert MIDI pitch to chroma value
def pitch_to_chroma(pitch): return pitch % 12
[ "def chroma(self):\n return Pitch.chroma(self)", "def midi_to_pitch(midi: int) -> float:\r\n return 440 * (2 ** ((midi - 69) / 12))", "def convert_stereo_to_mono(fragment, width):\n new_fragment = audioop.tomono(fragment, width, 0.5, 0.5)\n return new_fragment", "def encode_to_midi(self, *args...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert MIDI pitch to fundamental frequency in Hz
def pitch_to_freq(pitch): return 440.*2**((pitch-69)/12)
[ "def midi_to_freq(num):\n num_a = num - 69\n freq = 440 * 2**(num_a / 12.0)\n return freq", "def ToneToFrequency(tone):\n # 60=C4 ==> 261.63Hz\n # 69=A4 ==> 440Hz\n return 440.0 * (2.0 ** (1.0/12.0)) ** (tone - 69)", "def midi_to_pitch(midi: int) -> float:\r\n return 440 * (2 ** ((midi - 69...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract relevant fields from trade text.
def parse(self): for line in self.trade_text: print(f"TEsting {line}") if self.is_forex_symbol(line): self.symbol.append(line) if self.is_price(line):
[ "def parse_tweet(tweet):\n text = tweet.get('text', '')\n details = {}\n try:\n parts = [p.strip() for p in text.split(';')]\n details['date'] = parts[0]\n details['pm25'] = parts[2]\n details['aqi'] = parts[3]\n details['tweet_id'] = tweet['id_str']\n return detai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rotate the log, and report the filename back to the client.
def rotate_log(self, request): fn = self.conveyer.rotate_logs() request.response = 200 return fn
[ "def log_rotate(filename):\n os.rename(filename, '%s-%s' % (filename, strftime('%Y-%m-%d-%H-%M-%S')))", "def rotate_log(oldname, newname):\n if os.path.isfile(oldname):\n with open(newname, 'w') as newfile:\n with open(oldname, 'r') as oldfile:\n newfile.write(oldfile.read())\n oldfile.c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }