query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Returns mesh rrs in native space in mm and the mesh tris for the passed in vtk_mesh_file nii_mesh_file needs to be the corresponding niftii file from bet that corresponds to the same mesh as in vtk_mesh_file | def _get_vtk_mesh_native(vtk_mesh_file, nii_mesh_file):
data = pd.read_csv(vtk_mesh_file, delim_whitespace=True)
num_rrs = int(data.iloc[3, 1])
# these will be in voxel index space
rrs_flirtcoords = data.iloc[4 : num_rrs + 4, 0:3].to_numpy().astype(np.float64)
# move from flirtcoords mm to mri m... | [
"def _transform_vtk_mesh(\n vtk_mesh_file_in, nii_mesh_file_in, out_vtk_file, nii_mesh_file_out, xform_file\n):\n\n rrs_in, tris_in = _get_vtk_mesh_native(vtk_mesh_file_in, nii_mesh_file_in)\n\n xform_flirtcoords2native_out = _get_flirtcoords2native_xform(nii_mesh_file_out)\n\n if isinstance(xform_file,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns xform_flirtcoords2native transform that transforms from flirtcoords space in mm into native space in mm, where the passed in nii_mesh_file specifies the native space | def _get_flirtcoords2native_xform(nii_mesh_file):
# We will assume orientation of the smri is RADIOLOGICAL as RHINO will have made the smri the same orientation as the standard brain nii.
# But let's just double check that is the case:
smri_orient = _get_orient(nii_mesh_file)
if smri_orient != "RADIOLO... | [
"def _get_mne_xform_from_flirt_xform(flirt_xform, nii_mesh_file_in, nii_mesh_file_out):\n\n flirtcoords2native_xform_in = _get_flirtcoords2native_xform(nii_mesh_file_in)\n flirtcoords2native_xform_out = _get_flirtcoords2native_xform(nii_mesh_file_out)\n\n xform = flirtcoords2native_xform_out @ flirt_xform ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Outputs mesh to out_vtk_file, which is the result of applying the transform xform to vtk_mesh_file_in nii_mesh_file_in needs to be the corresponding niftii file from bet that corresponds to the same mesh as in vtk_mesh_file_in nii_mesh_file_out needs to be the corresponding niftii file from bet that corresponds to the ... | def _transform_vtk_mesh(
vtk_mesh_file_in, nii_mesh_file_in, out_vtk_file, nii_mesh_file_out, xform_file
):
rrs_in, tris_in = _get_vtk_mesh_native(vtk_mesh_file_in, nii_mesh_file_in)
xform_flirtcoords2native_out = _get_flirtcoords2native_xform(nii_mesh_file_out)
if isinstance(xform_file, str):
... | [
"def mesh_file_to_vtk(input_filename, output_filename, data_format=\"ascii\",\n coord_transform=None):\n print(\"Reading {}\".format(input_filename))\n mesh = nibabel.load(input_filename)\n print()\n print(\"Summary\")\n print(\"=======\")\n mesh.print_summary()\n\n points_l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a mm coordinates to mm coordinates MNE xform that corresponds to the passed in flirt xform. Note that we need to do this as flirt xforms include an extra xform based on the voxel dimensions (see _get_flirtcoords2native_xform). | def _get_mne_xform_from_flirt_xform(flirt_xform, nii_mesh_file_in, nii_mesh_file_out):
flirtcoords2native_xform_in = _get_flirtcoords2native_xform(nii_mesh_file_in)
flirtcoords2native_xform_out = _get_flirtcoords2native_xform(nii_mesh_file_out)
xform = flirtcoords2native_xform_out @ flirt_xform @ np.linal... | [
"def _get_flirtcoords2native_xform(nii_mesh_file):\n\n # We will assume orientation of the smri is RADIOLOGICAL as RHINO will have made the smri the same orientation as the standard brain nii.\n # But let's just double check that is the case:\n smri_orient = _get_orient(nii_mesh_file)\n if smri_orient !... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes flirt xform that moves from_nii to have voxel indices on the same axis as the voxel indices for target_nii. Note that this is NOT the same as registration, i.e. the images are not aligned. In fact the actual coordinates (in mm) are unchanged. It is instead about putting from_nii onto the same axes so that the ... | def _get_flirt_xform_between_axes(from_nii, target_nii):
to2tovox = np.linalg.inv(_get_sform(target_nii)["trans"])
fromvox2from = _get_sform(from_nii)["trans"]
from2to = to2tovox @ fromvox2from
return from2to | [
"def testInverseTransformConcatenatedXfm(self):\n\n new_xyz_coords = transform_xyz_coordinates_using_xfm(outputXfmFilename3,\n 6.68, 3.14, 7.00, use_inverse=True)\n assert new_xyz_coords == approx((-119.559994975925, -2.72634880128239, 0.0509... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Maps the (ndipoles,tpts) array of timeseries to the grid defined by reference_mask_fname and outputs them as a niftii file. Assumes the timeseries' dipoles correspond to those in reference_mask_fname. Both timeseries and reference_mask_fname are often output from rhino.transform_recon_timeseries. | def _timeseries2nii(timeseries, timeseries_coords, reference_mask_fname, out_nii_fname, times=None):
if len(timeseries.shape) == 1:
timeseries = np.reshape(timeseries, [-1, 1])
mni_nii_nib = nib.load(reference_mask_fname)
coords_ind = niimask2indexpointcloud(reference_mask_fname).T
coords_mni,... | [
"def ComputeMask(fmriFiles, outputFile, infT=0.4, supT=0.9): \n compute_mask_files( fmriFiles, outputFile, False, infT, supT, cc=1)",
"def nc_to_nps_int(inFile, outFile, date, xfcst, fields, source=None, \n geos2wrf=False, log=None, createIndividualFiles=False,\n expectedUnits... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save or show a renderer. | def save_or_show_renderer(renderer, filename):
if filename is None:
renderer.show()
else:
allowed_extensions = [".html", ".pdf", ".svg", ".eps", ".ps", ".tex"]
ext = Path(filename).suffix
if ext not in allowed_extensions:
raise ValueError(f"{ext} not allowed, please u... | [
"def renderer():",
"def set_render_option(self, render_option):\n self._renderer = pipeline_graph_renderer.get_renderer(render_option)",
"def renderer(string, changeIprRegionProcedure=\"string\", showBatchRenderLogProcedure=\"string\", iprOptionsProcedure=\"string\", globalsTabCreateProcNames=bool, unregiste... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates surface mesh in .surf format and in native mri space in mm from infile. | def _create_freesurfer_mesh_from_bet_surface(infile, surf_outfile, xform_mri_voxel2mri, nii_mesh_file=None):
pth, name = op.split(infile)
name, ext = op.splitext(name)
if ext == ".gz":
print("Creating surface mesh for {} .....".format(infile))
# Soft import raising an informative warning ... | [
"def unstructured_mesh(fname, sizing, convert):\n geo_tools.prep_mesh_config(\n fname + \"Morphology.geo\", fname + \"UMesh.geo\", sizing)\n mesh_domain(fname + \"UMesh.geo\")\n if convert:\n convert_mesh(fname + \"UMesh.msh\", fname + \"UMesh.xml\")",
"def parseMsmsSurface(self, pdb_fn, ve... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract RHINO files. This function extracts surfaces and coregistration files calculated in a previous run. | def extract_rhino_files(old_subjects_dir, new_subjects_dir, subjects="all", exclude=None, gen_report=True):
# Avoid circular imports
from osl.source_recon.rhino import plot_surfaces, coreg_display
from osl.report import src_report
# Validation
if exclude is None:
exclude = []
if isinst... | [
"def get_rhino_files(subjects_dir, subject):\n\n # Base RHINO directory\n rhino_dir = op.join(subjects_dir, subject, \"rhino\")\n if \" \" in rhino_dir:\n raise ValueError(\"subjects_dir/src_dir cannot contain spaces.\")\n\n # Surfaces files\n surfaces_dir = op.join(rhino_dir, \"surfaces\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the ebay_collect_and_remit_tax of this Taxes. | def ebay_collect_and_remit_tax(self, ebay_collect_and_remit_tax):
self._ebay_collect_and_remit_tax = ebay_collect_and_remit_tax | [
"def sales_tax(self, sales_tax):\n\n self._sales_tax = sales_tax",
"def inclusive_tax(self, inclusive_tax):\n\n self._inclusive_tax = inclusive_tax",
"def rental_tax_withheld_list(self, rental_tax_withheld_list):\n\n self._rental_tax_withheld_list = rental_tax_withheld_list",
"def save(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the included_in_price of this Taxes. | def included_in_price(self, included_in_price):
self._included_in_price = included_in_price | [
"def included_in_chart(self, included_in_chart):\n\n self._included_in_chart = included_in_chart",
"def set_buy_price(self, buy_price: float) -> None:\n self.buy_price = buy_price",
"def inclusive_tax_money(self, inclusive_tax_money):\n\n self._inclusive_tax_money = inclusive_tax_money",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the shipping_and_handling_taxed of this Taxes. | def shipping_and_handling_taxed(self, shipping_and_handling_taxed):
self._shipping_and_handling_taxed = shipping_and_handling_taxed | [
"def shipping_handling(self, shipping_handling):\n\n self._shipping_handling = shipping_handling",
"def shipping_handling_with_discount(self, shipping_handling_with_discount):\n\n self._shipping_handling_with_discount = shipping_handling_with_discount",
"def sales_tax(self, sales_tax):\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the tax_jurisdiction of this Taxes. | def tax_jurisdiction(self, tax_jurisdiction):
self._tax_jurisdiction = tax_jurisdiction | [
"def tax_id(self, value: str):\n self._tax_id = value\n self._dao.tax_id = value",
"def tax_money(self, tax_money):\n\n self._tax_money = tax_money",
"def jurisdictions(self, jurisdictions):\n if jurisdictions is None:\n raise ValueError(\"Invalid value for `jurisdictions`... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the tax_percentage of this Taxes. | def tax_percentage(self, tax_percentage):
self._tax_percentage = tax_percentage | [
"def setPercentage(self, percentage):\n\t\tself.percentage = percentage\n\t\tif self.expressionStateNode:\n\t\t\tself.expressionStateNode.slider.widget().setValue(percentage)",
"def set_default_tax(self):\n self.gui.spn_tax.setValue(\n Decimal(self.ctl.get_parameter_value(13))\n )",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the tax_type of this Taxes. | def tax_type(self, tax_type):
self._tax_type = tax_type | [
"def federal_tax_id_type(self, federal_tax_id_type):\n\n self._federal_tax_id_type = federal_tax_id_type",
"def set_type(self, ttype):\n self.type = ttype\n self.token.type = ttype",
"def set_type(self, atype):\n _ldns.ldns_rdf_set_type(self, atype)\n #parameters: ldns_rdf *, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply criterionspecific modifications to the given sample/hypotheses. | def prepare_sample_and_hypotheses(self, model, sample, hypos):
# compute BLEU for each hypothesis
hypos = self.add_bleu_to_hypotheses(sample, hypos)
norm_bleu = torch.FloatTensor([
[ self.scale * h['bleu'] / 100 for h in hypos_i]
for hypos_i in hypos
])
sa... | [
"def prepare_sample_and_hypotheses(self, model, sample, hypos):\n # compute token-level loss (unnormalized)\n sample['token_criterion_out'] = self.token_criterion(model, sample)\n\n # then prepare sample for sequence-level criterion\n return self.sequence_criterion.prepare_sample_and_hyp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes a requests to the api with an anime entry. Returns anime,themes | def request_anime(animentry: Tuple[int,str]) -> Tuple[Tuple[int,str],Optional[AnimeThemeAnime]]:
malid,title = animentry
anime = make_anime_request(title)
if not isinstance(anime,AnimeThemesTimeout):
anime = pick_best_anime(malid,title,anime)
return animentry,anime | [
"async def anime(message):\n query = message.content.strip()\n if not len(query):\n raise CommandError(\"Supply the name of an anime to search.\")\n auth = aiohttp.BasicAuth(username(), password())\n try:\n r = await http.get(\"https://myanimelist.net/api/anime/search.xml\", params=[\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes in an animelist and returns a tuple of wanted animelist and a list of animethemes. | def pick_needed(animelist: List[Tuple[int,str]]) -> Tuple[List[Tuple[int,str]],List[AnimeThemeAnime]]:
logger.debug(f'Loading animethemes data from {TEMPFILE}')
animethemes = []
animelist = {i[0]:i[1] for i in animelist}
with open(TEMPFILE,'r') as file:
for anime in json.load(file):
... | [
"def fetch_animethemes(animelist: List[Tuple[int,str]]) -> List[AnimeThemeAnime]:\n progressbar = \"[^] %s/%s\" if logger.level<=logging.INFO else \"\"\n tempfile_exists = isfile(TEMPFILE) and time.time()-getmtime(TEMPFILE) <= OPTIONS['download']['max_animethemes_age']\n if tempfile_exists:\n animel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the anime entries from animethemes or a data file younger than a day. Can show progress with `show_progress` string. Formats with % current,total. | def fetch_animethemes(animelist: List[Tuple[int,str]]) -> List[AnimeThemeAnime]:
progressbar = "[^] %s/%s" if logger.level<=logging.INFO else ""
tempfile_exists = isfile(TEMPFILE) and time.time()-getmtime(TEMPFILE) <= OPTIONS['download']['max_animethemes_age']
if tempfile_exists:
animelist,animethem... | [
"def fetch_animelist(self, user):\n entries = []\n\n mal_user = user.mal_user\n if not mal_user:\n return entries\n\n username = mal_user.username\n url = 'https://myanimelist.net/animelist/{}/load.json'.format(username)\n r = requests.get(url)\n json_entr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
start threads for the robot and the drawing function and return the drawed board coordinates | def calc_board(graphic_mode=False,start=None):
instructions = []
inqueue = queue.Queue()
outqueue = queue.Queue()
board_grid = {} # dict of painted coordinates, by default all are black -> 0 key is coords: (x,y), value is color (0,0):0
for x in range(40):
for y in range(40):
bo... | [
"def run(self):\n worker_args = (self.process_func, self.draw_func, self.start_point, self.end_point)\n work_thread = threading.Thread(target=self.worker, args=worker_args)\n\n work_thread.daemon = True\n work_thread.start()\n\n # rgb_list in refresh_image shouldnt be empty\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is supposed to be used as a decorator. It must decorate an other function, that is intended to be used as a decorator. Take a cup of coffee. It will allow any decorator to accept an arbitrary number of arguments, saving you the headache to remember how to do that every time. | def decorator_with_args(decorator_to_enhance):
# We use the same trick we did to pass arguments
def decorator_maker(*args, **kwargs):
# We create on the fly a decorator that accepts only a function
# but keeps the passed arguments from the maker.
def decorator_wrapper(func):
... | [
"def maybe_args(decorator):\n def wrapped_decorator(klass, *args):\n if len(args) == 1 and callable(args[0]):\n return decorator(klass, *args)\n else:\n def real_decorator(method):\n return decorator(klass, method, *args)\n return real_decorator\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fails if list of web elements does not contain text | def webelements_should_contain_text(elements, text):
for elem in elements:
if elem.text.lower() == text.lower():
return
raise AssertionError("Webelements don't contain text '%s'" % text) | [
"def webelements_should_not_contain_text(elements, text):\n for elem in elements:\n if elem.text.lower() == text.lower():\n raise AssertionError(\"Webelements contain text '%s'\" % text)",
"def test_contains_multiple_not_no_match(self):\n\n self.assert_selector(\n self.MARKU... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fails if list of web elements doesn't contain text | def webelements_should_not_contain_text(elements, text):
for elem in elements:
if elem.text.lower() == text.lower():
raise AssertionError("Webelements contain text '%s'" % text) | [
"def webelements_should_contain_text(elements, text):\n for elem in elements:\n if elem.text.lower() == text.lower():\n return\n raise AssertionError(\"Webelements don't contain text '%s'\" % text)",
"def test_contains_multiple_not_no_match(self):\n\n self.assert_selector(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get area weighted global mean values for a CMIP6 run for year slices. The actual calculation is all done by xarray. Most of this code is a baspy wrapper. A match is looked for in the baspy catalogue, then the global surface mean temperature is calculated from atmospheric monthly data. Only tested with CMIP6. | def get_global_mean(
Model: str,
Experiment: str,
RunID: str = None,
year_ranges: List[slice] = [slice(1850, 1950), slice(2000, 2010)],
var: str = "tas",
) -> List[float]:
row = df[
(df["Model"] == Model)
& (df["Experiment"] == Experiment)
& (df["Var"] == var)
]
i... | [
"def global_average(fld, gw):\n if \"time\" in fld.dims:\n return wgt_areaave_xr(fld, gw).mean(dim=\"time\")\n else:\n return wgt_areaave_xr(fld, gw)",
"def avg_area(clouds):\n if not clouds:\n return np.nan\n return xr.DataArray([c.area for c in clouds]).mean()",
"def National_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save changes to a card to the gateway. | def save(self):
gateway = get_gateway(self.gateway_name)
gateway.update_card(self) | [
"def put_card(self, card):\r\n self.deck.append(card)",
"def test_pay_ins_universal_pay_universal_pay_save_card(self):\n pass",
"def set_card(self, card: Card):\n self.russian_modified.block(True)\n self.english_modified.block(True)\n self.card = card\n self.__is_new_ca... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a card from the gateway. | def delete(self):
gateway = get_gateway(self.gateway_name)
gateway.delete_card(self)
return True | [
"def delete_card(self, token, card_id):\n CustomerCard.delete_card(card_id=card_id)\n return {}",
"def delete_card(self, cardID: str):\n\t\t# print(type(cardID))\n\t\tquery_str = [\n\t\t\t\"DELETE FROM cards\",\n\t\t\t\"WHERE card_id = '{0}';\"\n\t\t]\n\t\tself.c.execute(\n\t\t\tstr.join(\" \", que... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if players are connected to server | def CHECK_PLAYERS():
pattern = re.compile(".*[Nn]o.[Pp]layers.[Cc]onnected.*")
PLAYER_LIST = RCON_CLIENT('listplayers')
if pattern.search(PLAYER_LIST):
return False
else:
return PLAYER_LIST | [
"def all_players_connected(self):\n num_clients = len(self.server.CLIENTS)\n return num_clients == 4",
"def is_online(self):\n value = b'test'\n try:\n return self.probe_server(value=value) == value\n except ConnectionError:\n return False",
"def is_conne... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if no players connected, shutdown then start server | def RESTART_SERVER():
#try:
#MAP = MAP_NAME
#except:
#pass
if not CHECK_PLAYERS():
pass
else:
RCON_CLIENT("broadcast Server going down for maintenance in 3 minutes")
PLAYER_MONITOR()
RECENT_SAVE = sshconnect.sendCommand('if [[ $(( $(/bin/date +%... | [
"def welcome_run(self):\n self.broadcast_message({'ready': True})\n print \"Waiting for all players to be ready...\"\n while not all(self.players_ready.values()):\n time.sleep(1)\n print \"...\"\n print \"Ready to play! Starting game in 3 seconds...\"\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clean up extra mod content that is not part of the active mods in GameUserSettings.ini | def MOD_CLEANUP():
DIR_CHK = sshconnect.sendCommand("if [[ -d {}/ShooterGame/Content/Mods ]]; then echo 'exists' ; fi".format(SERV_ARK_INSTALLDIR), parse=True, target="exists")
if DIR_CHK:
## if existing file has match under ActiveMods then remove it from array
sshconnect.sendCommand("/bin/sed -... | [
"def clean_mods(self, modifiers):\n if \"\" in modifiers and isinstance(modifiers, list):\n modifiers.remove(\"\")",
"def shield_from_user_config(request):\n _pop_out_yaml_from_config(orion.core.config)",
"def _clear_replace_info(self):\n for infos in self.db[self.tag].callids[self.c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test to make sure `getNewTimes` returns ten elements when 75 elements are entered over a ten day span with one day binning in place. | def test_getNewTimes_one_day_bin_ten_days():
times = np.random.uniform(0, 10, 75)
newtimes = wm.getNewTimes(times, 1.)
print(len(newtimes))
assert len(newtimes) == 10 | [
"def test_big_gaps_getNewVals():\n timebin = 1.\n times = np.concatenate((np.random.uniform(0, 10, 50),\n np.random.uniform(30, 40, 50)))\n newtimes = wm.getNewTimes(times, timebin)\n rvs = np.random.normal(loc=0, scale=5, size=100)\n uncs = np.random.normal(loc=1., scale=0.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test to make sure times are shifted properly if the phase optional argument is used with a two day bin. | def test_getNewTimes_with_half_phase_two_day_bin():
times = np.random.uniform(0, 10, 75)
newtimes = wm.getNewTimes(times, 2.)
newtimes2 = wm.getNewTimes(times, 2., phase=0.5)
assert np.round((np.min(newtimes2) - np.min(newtimes)), 7) == 1.000 | [
"def phase(dp):\n from tayph.vartests import typetest\n import numpy as np\n from astropy.io import ascii\n from astropy.time import Time\n from astropy import units as u, coordinates as coord\n import tayph.util as ut\n dp=check_dp(dp)#Path object\n d=ascii.read(dp/'obs_times',comment=\"#\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The output dimension of newRVs should be the same as the input dimension of newtimes | def test_getNewVals_for_newrvs_dim():
newtimes = np.arange(10)
times = np.random.uniform(0, 10, 100)
rvs = np.random.uniform(-5, 5, 100)
uncs = np.random.normal(loc=1., scale=0.5, size=100)
newRVs, newUncs = wm.getNewVals(newtimes, times, rvs, uncs, timebin=1.)
assert len(newtimes) == len(newRVs... | [
"def test_getNewVals_for_newuncs_dim():\n newtimes = np.arange(10)\n times = np.random.uniform(0, 10, 100)\n rvs = np.random.uniform(-5, 5, 100)\n uncs = np.random.normal(loc=1., scale=0.5, size=100)\n newRVs, newUncs = wm.getNewVals(newtimes, times, rvs, uncs, timebin=1.)\n assert len(newtimes) =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The output dimension of newRVs should be the same as the input dimension of newtimes | def test_getNewVals_for_newuncs_dim():
newtimes = np.arange(10)
times = np.random.uniform(0, 10, 100)
rvs = np.random.uniform(-5, 5, 100)
uncs = np.random.normal(loc=1., scale=0.5, size=100)
newRVs, newUncs = wm.getNewVals(newtimes, times, rvs, uncs, timebin=1.)
assert len(newtimes) == len(newUn... | [
"def test_getNewVals_for_newrvs_dim():\n newtimes = np.arange(10)\n times = np.random.uniform(0, 10, 100)\n rvs = np.random.uniform(-5, 5, 100)\n uncs = np.random.normal(loc=1., scale=0.5, size=100)\n newRVs, newUncs = wm.getNewVals(newtimes, times, rvs, uncs, timebin=1.)\n assert len(newtimes) ==... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The RV scatter (standard deviation from normally distributed points about the mean should be reduced when binning observations down. This routine checks that. | def test_getNewVals_rv_scatter():
newtimes = np.arange(10)
times = np.random.uniform(0, 10, 100)
rvs = np.random.normal(loc=0, scale=5, size=100)
uncs = np.random.normal(loc=1., scale=0.5, size=100)
newRVs, newUncs = wm.getNewVals(newtimes, times, rvs, uncs, timebin=1.)
assert np.std(newRVs) < n... | [
"def __scatter(self, x, y, attributes):\n scatter_index = []\n for i in range(x.shape[1]):\n if not isinstance(x[0, i], str): # find the index whose value is continuous.\n scatter_index.append(i)\n for ind in scatter_index:\n split_threshold = 0.0\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure getNewVals routine can handle big gaps in times | def test_big_gaps_getNewVals():
timebin = 1.
times = np.concatenate((np.random.uniform(0, 10, 50),
np.random.uniform(30, 40, 50)))
newtimes = wm.getNewTimes(times, timebin)
rvs = np.random.normal(loc=0, scale=5, size=100)
uncs = np.random.normal(loc=1., scale=0.5, size=100... | [
"def create_and_fill(old_data, entries):\n final_array = np.arange(entries * 5, dtype=int)\n final_array.shape = (entries,5)\n final_array = get_date(final_array, old_data, entries)\t# contains no header/footer line\n final_array = get_time(final_array, old_data, entries)\n final_array = get_temp_dew... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the presence of some expected code in the html viewer. | def _check_html(html_view):
assert "Parameters" in str(html_view)
assert "data:image/svg+xml;base64," in str(html_view)
assert html_view._repr_html_() == html_view.body | [
"def test_is_html_tag_properly(self):\r\n file=\"HTMLDOC.txt\"\r\n html_doc=p.read_file(file)\r\n result=p.is_html_tag_properly(html_doc)\r\n self.assertTrue(result,True)",
"def and_has_html(self, html: str):\n pass",
"def has_html_preview(self) -> bool:\n return False"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests report generation after fitting on 3D data. | def test_reports_after_fit_3d_data(masker_class,
input_parameters,
data_img_3d):
masker = masker_class(**input_parameters)
masker.fit(data_img_3d)
html = masker.generate_report()
_check_html(html) | [
"def test_reports_after_fit_3d_data_with_mask(masker_class,\n input_parameters,\n data_img_3d,\n mask):\n input_parameters[\"mask_img\"] = mask\n masker = masker_class(**input_pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests report generation after fitting on 3D data with mask_img. | def test_reports_after_fit_3d_data_with_mask(masker_class,
input_parameters,
data_img_3d,
mask):
input_parameters["mask_img"] = mask
masker = masker_class(**input_parameters)
... | [
"def test_reports_after_fit_3d_data(masker_class,\n input_parameters,\n data_img_3d):\n masker = masker_class(**input_parameters)\n masker.fit(data_img_3d)\n html = masker.generate_report()\n _check_html(html)",
"def test_nifti_maps_m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a warning is both given and written in the report if no images were provided to fit. | def test_warning_in_report_after_empty_fit(masker_class,
input_parameters):
masker = masker_class(**input_parameters)
assert masker._report_content['warning_message'] is None
masker.fit()
warn_message = f"No image provided to fit in {masker_class.__name__}."
... | [
"def testWarnings(self):\n radialTransform = afwGeom.RadialXYTransform([0, 2.0, 3.0])\n wcs = afwImage.DistortedTanWcs(self.tanWcs, radialTransform)\n self.assertRaises(UserWarning, approximateWcs, wcs=wcs, bbox=self.bbox, order=2)",
"def test_warn_severity(check_plugin):\n error = check_plu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a TypeError is raised when the argument `displayed_maps` of `generate_report()` is not valid. | def test_nifti_maps_masker_report_displayed_maps_errors(
niftimapsmasker_inputs, displayed_maps):
masker = NiftiMapsMasker(**niftimapsmasker_inputs)
masker.fit()
with pytest.raises(TypeError,
match=("Parameter ``displayed_maps``")):
masker.generate_report(displayed_map... | [
"def test_nifti_maps_masker_report_maps_number_errors(\n niftimapsmasker_inputs, displayed_maps):\n masker = NiftiMapsMasker(**niftimapsmasker_inputs)\n masker.fit()\n with pytest.raises(ValueError,\n match=\"Report cannot display the following maps\"):\n masker.generate... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a ValueError is raised when the argument `displayed_maps` contains invalid map numbers. | def test_nifti_maps_masker_report_maps_number_errors(
niftimapsmasker_inputs, displayed_maps):
masker = NiftiMapsMasker(**niftimapsmasker_inputs)
masker.fit()
with pytest.raises(ValueError,
match="Report cannot display the following maps"):
masker.generate_report(displ... | [
"def test_nifti_maps_masker_report_displayed_maps_errors(\n niftimapsmasker_inputs, displayed_maps):\n masker = NiftiMapsMasker(**niftimapsmasker_inputs)\n masker.fit()\n with pytest.raises(TypeError,\n match=(\"Parameter ``displayed_maps``\")):\n masker.generate_report(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests report generation for NiftiMapsMasker with displayed_maps passed as a list of a Numpy arrays. | def test_nifti_maps_masker_report_list_and_arrays_maps_number(
niftimapsmasker_inputs, displayed_maps):
masker = NiftiMapsMasker(**niftimapsmasker_inputs)
masker.fit()
html = masker.generate_report(displayed_maps)
assert masker._report_content['report_id'] == 0
assert masker._report_content[... | [
"def test_nifti_maps_masker_report_integer_and_all_displayed_maps(\n niftimapsmasker_inputs, displayed_maps):\n masker = NiftiMapsMasker(**niftimapsmasker_inputs)\n masker.fit()\n expected_n_maps = 9 if displayed_maps == 'all' else min(9, displayed_maps)\n if displayed_maps != 'all' and displayed... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests NiftiMapsMasker reporting with no image provided to fit and displayed_maps provided as an integer or as 'all'. | def test_nifti_maps_masker_report_integer_and_all_displayed_maps(
niftimapsmasker_inputs, displayed_maps):
masker = NiftiMapsMasker(**niftimapsmasker_inputs)
masker.fit()
expected_n_maps = 9 if displayed_maps == 'all' else min(9, displayed_maps)
if displayed_maps != 'all' and displayed_maps > 9:... | [
"def test_nifti_maps_masker_report_displayed_maps_errors(\n niftimapsmasker_inputs, displayed_maps):\n masker = NiftiMapsMasker(**niftimapsmasker_inputs)\n masker.fit()\n with pytest.raises(TypeError,\n match=(\"Parameter ``displayed_maps``\")):\n masker.generate_report(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests NiftiMapsMasker reporting with image provided to fit. | def test_nifti_maps_masker_report_image_in_fit(niftimapsmasker_inputs):
masker = NiftiMapsMasker(**niftimapsmasker_inputs)
image, _ = generate_random_img((13, 11, 12), affine=np.eye(4), length=3)
masker.fit(image)
html = masker.generate_report(2)
assert masker._report_content['report_id'] == 0
a... | [
"def test_nifti_maps_masker_report_integer_and_all_displayed_maps(\n niftimapsmasker_inputs, displayed_maps):\n masker = NiftiMapsMasker(**niftimapsmasker_inputs)\n masker.fit()\n expected_n_maps = 9 if displayed_maps == 'all' else min(9, displayed_maps)\n if displayed_maps != 'all' and displayed... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clears the overriden Geolocation Position and Error. | def clear_geolocation_override(self):
raise NotImplementedError() | [
"def reset(self):\n self.logger.debug(\"Reset location\")\n self.start_location = self.location\n self.distance = 0",
"def clear_position(self):\n\n # Remove each item from the drop down menu and positions dictionary.\n for key in self.gui.savedPos.keys():\n index = s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overrides the Geolocation Position or Error. | def set_geolocation_override(self):
raise NotImplementedError() | [
"def clear_geolocation_override(self):\n raise NotImplementedError()",
"def error_noloc(message):\n location = noloc()\n error(message, location)",
"def _geocoder_exception_handler(self, error):\n pass",
"def set_gps(self, x, y):\n\t\tpass",
"def set_location(self, x, y):\r\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create container from image. Pulls defaults from image.inspect() | def _create(self, *args, **kwargs):
details = self.inspect()
config = ConfigDict(image_id=self._id, **kwargs)
config["command"] = details.config.get("cmd")
config["env"] = self._split_token(details.config.get("env"))
config["image"] = copy.deepcopy(details.repotags[0]) # Falls to https://github.co... | [
"def create_container(self):\n identity = None\n print(\"Creating docker image: {}.. be patient this can take a while!\".format(self.tag))\n try:\n logging.info(\"build(path=%s, tag=%s, rm=True, decode=True)\", self.dest, self.tag)\n api_client = self.get_api_client()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Do one atomic commit. This is in fact not atomic since the memcached wrapper needs more work but it is the best we can do. | def commit(self):
txn = transaction.get()
if hasattr(txn, 'v_delete_cache'):
if self.delete_multi(to_delete=txn.v_delete_cache, immediate=True) != 1:
LOG.error("_invalidate_cache delete_multi failed")
txn.v_delete_cache = []
if hasattr(txn, 'v_cache'):
... | [
"def commit(self):\n result = self.lastTransaction.commit()\n self.lastTransaction = None\n return result",
"def c_commit(self, args):\n log.info('forcing commit')\n self.db.commit()",
"def _do_commit(self):\n self.backend.commit()",
"def commit(self):\n\t\tdel self.t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Invalidate cached results affected by rid and / or index_name | def _invalidate_cache(self, rid=None, index_name='', immediate=False):
global _memcache_failure_timestamp
if not self._memcache_available():
return
cache_id = '/'.join(self.getPhysicalPath())
LOG.debug('[%s] _invalidate_cache rid=%s, index_name=%s' % (cache_id, rid, index_name))
to_delete... | [
"def invalidate_caches():",
"def clear_old_indexes():\n for f in os.listdir(CLUSTER_RESULT_DIR):\n if f.endswith('.idx'):\n os.remove(os.path.join(CLUSTER_RESULT_DIR, f))",
"def clear_caches(self):",
"def _invalidate_caches(self):\n pass",
"async def clear_cache(\n self,\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds an object to the Catalog by iteratively applying it to all indexes. 'object' is the object to be cataloged 'uid' is the unique Catalog identifier for this object If 'idxs' is specified (as a sequence), apply the object only to the named indexes. If 'update_metadata' is true (the default), also update metadata for ... | def catalogObject(self, object, uid, threshold=None, idxs=None,
update_metadata=1):
if idxs is None:
idxs = []
data = self.data
index = self.uids.get(uid, None)
if index is not None:
self._invalidate_cache(rid=index)
if index is None: # we are inserting ... | [
"def reindexObject(self, object, idxs=[], update_metadata=1, uid=None):\n if uid is None:\n uid = self.__url(object)\n if idxs != []:\n # Filter out invalid indexes.\n valid_indexes = self._catalog.indexes.keys()\n idxs = [i for i in idxs if i in valid_index... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uncatalog and object from the Catalog. and 'uid' is a unique Catalog identifier Note, the uid must be the same as when the object was catalogued, otherwise it will not get removed from the catalog This method should not raise an exception if the uid cannot be found in the catalog. | def uncatalogObject(self, uid):
data = self.data
uids = self.uids
paths = self.paths
indexes = self.indexes.keys()
rid = uids.get(uid, None)
if rid is not None:
self._invalidate_cache(rid=rid)
for name in indexes:
x = self.getIndex(name)
if hasattr(x, 'u... | [
"def remove(uid):",
"def unindex_object( self, document_id ):\n attribute_ids = self._catalog.getIndex('record')._index.get( document_id )\n if attribute_ids is None:\n return\n\n if type( attribute_ids ) is IntType:\n attribute_ids = [ attribute_ids ]\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterate through the indexes, applying the query to each one. If merge is true then return a lazy result set (sorted if appropriate) otherwise return the raw (possibly scored) results for later merging. Limit is used in conjuntion with sorting or scored results to inform the catalog how many results you are really inter... | def search(self, request, sort_index=None, reverse=0, limit=None, merge=1):
rs = None # resultset
# Indexes fulfill a fairly large contract here. We hand each
# index the request mapping we are given (which may be composed
# of some combination of web request, kw mappings or plain old dicts)
# and... | [
"def multi_execute(self, version=1):\n returnable = []\n for limits in self.query['search_limits']:\n child = deepcopy(self.query)\n child['search_limits'] = limits\n q = self.__class__(child).return_json(raw_python_object=True,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Path to the given pointcloud. | def get_pointcloud_path(self, item_name: str) -> str:
return super().get_item_path(item_name) | [
"def _fix_point_cloud_path(self, path: str) -> str:\n unwanted_prefix = (\n \"/large_experiments/p3/replay/datasets/co3d/co3d45k_220512/export_v23/\"\n )\n if path.startswith(unwanted_prefix):\n path = path[len(unwanted_prefix) :]\n assert self.dataset_root is not N... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Information for Pointcloud with given name. | def get_pointcloud_info(self, item_name: str) -> PointcloudInfo:
return self.get_item_info(item_name) | [
"def visualize_pointcloud_new(pointcloud, name, save_path):\n # Open 3D can only store pointcloud as .ply\n save_file_ply = os.path.join(save_path, \"{}.ply\".format(name))\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(pointcloud)\n o3d.io.write_point_cloud(save_file_ply... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds given item file to dataset items directory, and adds given annotation to dataset annotations directory. if ann is None, creates empty annotation file. | def add_item_file(
self,
item_name: str,
item_path: str,
ann: Optional[Union[PointcloudAnnotation, str]] = None,
_validate_item: Optional[bool] = True,
_use_hardlink: Optional[bool] = False,
item_info: Optional[Union[PointcloudInfo, Dict, str]] = None,
) -> No... | [
"def add_item_np(\n self,\n item_name: str,\n pointcloud: np.ndarray,\n ann: Optional[Union[PointcloudAnnotation, str]] = None,\n item_info: Optional[NamedTuple] = None,\n ) -> None:\n # TODO: is it ok that names of params differs from base function?\n # TODO: che... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds given numpy array as a pointcloud to dataset items directory, and adds given annotation to dataset ann directory. if ann is None, creates empty annotation file. | def add_item_np(
self,
item_name: str,
pointcloud: np.ndarray,
ann: Optional[Union[PointcloudAnnotation, str]] = None,
item_info: Optional[NamedTuple] = None,
) -> None:
# TODO: is it ok that names of params differs from base function?
# TODO: check this funct... | [
"def _write_annotation(filename, annotation):\n _mkdir(os.path.dirname(filename))\n save_pbobject_as_json(annotation, filename)",
"def add_annotations(self, annotations):\n\n if not isinstance(annotations, list):\n print('Image.add_annotations expects a list, received {}'.format(type(annot... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read pointcloud annotation of item from json. | def get_ann(
self, item_name, project_meta: ProjectMeta, key_id_map: Optional[KeyIdMap] = None
) -> PointcloudAnnotation:
ann_path = self.get_ann_path(item_name)
return PointcloudAnnotation.load_json_file(ann_path, project_meta, key_id_map) | [
"def load_json_file(\n cls, path: str, project_meta: ProjectMeta, key_id_map: Optional[KeyIdMap] = None\n ) -> PointcloudAnnotation:\n\n with open(path) as fin:\n data = json.load(fin)\n return cls.from_json(data, project_meta, key_id_map)",
"def read_annotations(path):\n ali... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get train and val items information from project by given train and val counts. | def get_train_val_splits_by_count(
project_dir: str, train_count: int, val_count: int
) -> Tuple[List[PointcloudItemInfo], List[PointcloudItemInfo]]:
def _list_items_for_splits(project) -> List[PointcloudItemInfo]:
items = []
for dataset in project.datasets:
... | [
"def get_train_val_splits_by_dataset(\n project_dir: str, train_datasets: List[str], val_datasets: List[str]\n ) -> Tuple[List[PointcloudItemInfo], List[PointcloudItemInfo]]:\n\n def _add_items_to_list(project, datasets_names, items_list):\n for dataset_name in datasets_names:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get train and val items information from project by given train and val tags names. | def get_train_val_splits_by_tag(
project_dir: str,
train_tag_name: str,
val_tag_name: str,
untagged: Optional[str] = "ignore",
) -> Tuple[List[PointcloudItemInfo], List[PointcloudItemInfo]]:
untagged_actions = ["ignore", "train", "val"]
if untagged not in untagged_act... | [
"def get_train_val_splits_by_dataset(\n project_dir: str, train_datasets: List[str], val_datasets: List[str]\n ) -> Tuple[List[PointcloudItemInfo], List[PointcloudItemInfo]]:\n\n def _add_items_to_list(project, datasets_names, items_list):\n for dataset_name in datasets_names:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get train and val items information from project by given train and val datasets names. | def get_train_val_splits_by_dataset(
project_dir: str, train_datasets: List[str], val_datasets: List[str]
) -> Tuple[List[PointcloudItemInfo], List[PointcloudItemInfo]]:
def _add_items_to_list(project, datasets_names, items_list):
for dataset_name in datasets_names:
data... | [
"def get_train_val_splits_by_count(\n project_dir: str, train_count: int, val_count: int\n ) -> Tuple[List[PointcloudItemInfo], List[PointcloudItemInfo]]:\n\n def _list_items_for_splits(project) -> List[PointcloudItemInfo]:\n items = []\n for dataset in project.datasets:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uploads pointcloud project to Supervisely from the given directory. | def upload(
directory: str,
api: Api,
workspace_id: int,
project_name: Optional[str] = None,
log_progress: Optional[bool] = False,
progress_cb: Optional[Union[tqdm, Callable]] = None,
) -> Tuple[int, str]:
return upload_pointcloud_project(
director... | [
"def upload(self, relative_path, base_dir):\n raise NotImplementedError",
"def _UploadFiles(upload_dir, files):\n if files:\n google_storage_upload_dir = os.path.join(_RENDER_TEST_BUCKET, upload_dir)\n cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gsutil.py'),\n '-m', 'cp']\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download pointcloud project to the local directory. | def download_pointcloud_project(
api: Api,
project_id: int,
dest_dir: str,
dataset_ids: Optional[List[int]] = None,
download_items: Optional[bool] = True,
download_related_images: Optional[bool] = True,
download_pointclouds_info: Optional[bool] = False,
batch_size: Optional[int] = 10,
... | [
"def download_project_files():\n log_path = os.getcwd() + \"/log/\"\n output = subprocess.getstatusoutput(\"cd AIstudio_Download && python ./aistudio_client.py\")\n with open(log_path + \"download.log\", \"a\") as flog:\n flog.write(\"%s\" % (output[1]))",
"def download(self,filename):\n\t\tdirect... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the data to match recent user changes. Only covers selecting a new Jconf; the functions for adding/deleting one directly changes the data. | def UpdateData(self, event = None):
#currentSelection = self.confList.GetStringSelection()
#self.state.Edit("JconfSelection", currentSelection)
self.React()
self.UpdateDisplay()
return | [
"def update_data(current_user_data, filename):\n\n try:\n stored_data = read_data(filename)\n logger.info(\"Merging new data into stored\")\n\n for k, v in current_user_data.items():\n if existing := stored_data.get(k):\n stored_data[k] = list(set(existing) | set(v)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
User chooses a new Jconf file to add to the list. | def AddNew(self, event):
##Default directory for the search is the
##DepsDir/JUGGLER_FOLDER/configFiles.
## f = self.state.GetSurface("DependenciesDir")
## if f != None:
## f = os.path.join(f, JUGGLER_FOLDER, "configFiles")
## else:
## f = VELAUNCHER_DIR
... | [
"def add_jasmin_file(self, jfile):\n self._jasmin_files.append(jfile)",
"def Rename(self, event):\n name = self.confList.GetStringSelection()\n while True:\n n = self.confList.GetStringSelection()\n p = self.state.GetSurface(\"JconfDict\").GetPath(n)\n f = os.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renames the selected Jconf entry. | def Rename(self, event):
name = self.confList.GetStringSelection()
while True:
n = self.confList.GetStringSelection()
p = self.state.GetSurface("JconfDict").GetPath(n)
f = os.path.split(p)[1]
dlg = wx.TextEntryDialog(self,
... | [
"def rename(self, args):\n\t\tif len(args) not in range(1, 2 + 1):\n\t\t\terror('wrong number of args.')\n\n\t\tsilent = are_in('-s', '--silent', args, type=\"any\")\n\n\t\tconfig = self.__get_config()\n\t\tif config.get(args[0], False) is False:\n\t\t\terror('The key \"{}\" doesn\\'t exists.'.format(args[0]))\n\t\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Warns user if oldName was changed to newName. | def NameChangeWarning(self, oldName, newName):
dlg = wx.MessageDialog(None,
"The name %s already existed" %(oldName) + \
" in the list.\n" + \
"Your entry was given the" + \
" name %s inst... | [
"def nameChanged(self, oldName, newName):",
"def change_name(self):\n if self.user_can_update_information():\n old_firstname = self.user.firstname\n old_surname = self.user.surname\n self.user.firstname = input(\"What is your firstname?\\n\")\n self.user.surname ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that we can create an invalid person, and validation will fail | def test_basic_invalid_person():
bob = Person("Bob B. Johnson")
bob.validate()
try:
bob.name = None
assert not bob.validate()
except ValidationError:
pass | [
"def test_create_person_bad_age(self):\n url = reverse('person-all')\n data = {\"name\": \"Test name\",\n \"age\": \"SORRY\",\n \"address\": \"Test address\",\n \"work\": \"Test work\"\n }\n response = self.client.post(url, data, forma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that we can add contact information | def test_add_contact_information():
bob = Person("John Q. Public, Esq.",
gender="male", image="http://example.com/john.jpg",
summary="Some person")
bob.validate()
bob.add_contact_detail(type='voice',
value='876-5309',
n... | [
"def test_create_contact(self):\n response = self.cont1.create_contact(self.contact_list)\n self.assertTrue(response['message'], \"Contact successfuly created\")",
"def test_auth_create_participant_contact(self):\n pass",
"def test_update_contact(self):\n self.cont1.create_contact(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the latest jupyter kernel connection file. | def _read_latest_connection_file(cls):
runtime_dir = jupyter_runtime_dir()
files = glob.glob(os.path.join(runtime_dir, 'kernel-*.json'))
if len(files) == 0:
return None
# use the latest connection file
connection_file = max(files, key=os.path.getctime)
with o... | [
"def get_jupyter_notebook_info():\n def get(url):\n req = urllib.request.Request(url, headers={'content-type': 'application/json'})\n response = urllib.request.urlopen(req)\n return json.loads(response.read())\n\n kernel_id = re.search('kernel-(.*).json',\n ipyker... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes value to the left and to the right of t according to the `endpoints`. Alpha is the fraction of distance from left endpoint to right endpoint that t has covered. | def linear_interpolation(left_value: float, right_value: float, alpha: float):
return left_value + alpha * (right_value - left_value) | [
"def integer_interpolate(\n start: T,\n end: T,\n alpha: float\n) -> tuple[int, float]:\n if alpha >= 1:\n return (end - 1, 1.0)\n if alpha <= 0:\n return (start, 0)\n value = int(interpolate(start, end, alpha))\n residue = ((end - start) * alpha) % 1\n return (value, residue)"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Linear interpolation between initial_p and final_p over schedule_timesteps. After this many timesteps pass final_p is returned. | def __init__(self, schedule_timesteps: int, final_p: float, initial_p=1.0):
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p | [
"def lerp(initial, final, progress):\n return initial * (1 - progress) + final * progress",
"def linear_schedule(\n init_value: Scalar,\n end_value: Scalar,\n transition_steps: int,\n transition_begin: int = 0,\n) -> Schedule:\n return polynomial_schedule(\n init_value=init_value,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method specifies the frame for the OdbDisplay object. | def setFrame(self, frame: OdbFrame):
pass | [
"def set_frame(self, frame=\"CELESTIAL_MEAN_OF_DATE\"):\n self.set_abstract_item(\"Initial Bulletin\", \"Frame\", frame)",
"def frame_edit(self, frame):\n return frame",
"def set_frame(self, frame):\n self.frame = frame",
"def _set_frame(self):\n rectangle(self._canvas, self._plot_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method specifies the section point for the current primary, symbol and status variables. | def setPrimarySectionPoint(self, sectionPoint: dict, activePly: str):
pass | [
"def get_section(section):",
"def get_section(section_index):\n return lp_start_end_data[section_index]",
"def _section_offset(self, n):\r\n return self['e_shoff'] + n * self['e_shentsize']",
"def getSection(self):\n return self.getSegment().getSectionAtAddress(self.getEntryPoint())",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method creates a ViewCut object. Notes | def ViewCut(
self,
name: str,
shape: SymbolicConstant,
origin: tuple,
normal: typing.Union[SymbolicConstant, float],
axis2: typing.Union[SymbolicConstant, float],
csysName: str,
cylinderAxis: typing.Union[SymbolicConstant, float],
followDeformation... | [
"def cut(self):\n self.focus()\n self.dispatch('Cut')\n return self",
"def to_cut(self):\n from lhotse.cut import MonoCut, MultiCut\n\n cls = MonoCut if self.num_channels == 1 else MultiCut\n return cls(\n id=self.id,\n start=0.0,\n durati... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the filename and line number for stack output. | def __parse_stack_info(stack_info):
caller = getframeinfo(stack_info[1][0])
return caller.filename + ":" + str(caller.lineno) | [
"def __line__():\n import inspect\n frame = inspect.stack()[1][0]\n return inspect.getframeinfo(frame).lineno",
"def error_source():\n import traceback\n try:\n # return filename and lineno\n # context and content are also available\n import sys\n exc_cls, exc, tb = sys.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the formatter to either a custom format or the default format. | def __set_formatter(self, log_format=None, default=False):
if not default:
self.stream_handler.setFormatter(MyFormatter(log_format))
if self.file_path:
self.file_handler.setFormatter(MyFormatter(log_format))
else:
self.stream_handler.setFormatter(MyFor... | [
"def setFormatter(self, fmt):\r\n pass",
"def change_formatter(self, formatter):\n self.num_format=format.as_formatter(formatter)\n self.show_value()",
"def change_formatter(self, formatter):\n self.num_format=format.as_formatter(formatter)\n self.set_value(None)",
"def _set... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
We are minimizing all three structures, then checking the potential energy using GB forcefields We could, alternatively, minimize the docked structure and then extract trajectories (1 frame long), more like a 1trajectory mmgbsa. output is path used in "RunDocking". It has a metric.csv file. | def RunMinimization(build_path, outpath, one_traj=False):
from . import minimize
success = True
try:
rec_energy = minimize.MinimizedEnergy(f'{build_path}/apo')
lig_energy = minimize.MinimizedEnergy(f'{build_path}/lig')
com_energy = minimize.MinimizedEnergy(f'{build_path}/com')
... | [
"def main():\n # Efforts to obtain name of pwscf input file for given md run\n try:\n pw_input = sys.argv[1]\n except IndexError:\n try:\n pw_input = glob.glob('input.*.pw')[0]\n except:\n print '\\nMake sure you are in the proper directory, exiting now...\\n'\n\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate DiffieHellman key pair (public, private), given a cyclic group defined by modulus p and generator g | def gen_DH_keys(p=DH_P, g=DH_G):
private = randbelow(2**256) % p
public = pow(g, private, p)
return public, private | [
"def diffie_hellman(p: Prime, g: Prime) -> Keys:\n # create private keys for alice and bob\n a = PrivateKey.generate_private_key()\n b = PrivateKey.generate_private_key()\n\n # generate public keys by mixing primes\n A = pow(g, a, p) # share this with Bob\n B = pow(g, b, p) # share this with Ali... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate public and private keys for Nbit RSA, using the public exponent e. Each key is returned as a tuple (e/d, n), where e/d is the public/private exponent, and n is the modulus. If strong is True, we'll ue PyCrypto's getStrongPrime function, which requires N > 512 and is a multiple of 128. | def gen_RSA_keys(N=1024, e=3, strong=True):
if strong:
prime_func = lambda x: getStrongPrime(x//2, e=e)
else:
prime_func = lambda x: getPrime(x//2)
good = False
while not good:
try:
p, q = prime_func(N), prime_func(N)
n = p*q
totient = (p-1)*(q... | [
"def generate_keypair(bits):\n p = generate_prime(bits // 2)\n # print(p)\n q = generate_prime(bits // 2)\n # print(q)\n n = p * q\n return PrivateKey(p, q, n), PublicKey(n)",
"def rsa_keys(p: int = None, q: int = None, e: int = 3) -> RSA_Keys:\n\n if not p or p <= 1:\n p = matasano.ma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process data using the given RSA key (exponent, modulus) Input data can be an int or byteslike (interpreted as bigendian). We'll return either an int or bytes, respectively | def cipher_RSA(data, key):
byte_input = False
if isinstance(data, bytes) or isinstance(data, bytearray):
byte_input =True
data = int.from_bytes(data, 'big')
cipher = pow(data, key[0], key[1])
if byte_input:
cipher = int_to_bytes(cipher)
return cipher | [
"def RSA_Encryption(data, public_key):\n \n pubKey = str_to_RSAKey(public_key)\n encryptor = PKCS1_OAEP.new(pubKey)\n encrypted = encryptor.encrypt(data)\n return encrypted",
"def decode_pkcs8_public(cls, alg_params, key_data):\n\n # pylint: disable=unused-argument\n\n return (key_dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the nth root of an integer x using binary search. This should work even if the integer is too large to convert to float (i.e., if pow(x, 1/n) fails | def invpow(x, n):
"""Find brackets [N, 2*N] which contain the root"""
high = 1
while high**n <= x:
high *= 2
low = high // 2
while low < high:
mid = (low + high) // 2
mid_pow = mid**n
if low < mid and mid_pow < x:
low = mid
elif high > mid and mi... | [
"def findRoot(x, power, epsilon):\n if x < 0 and power%2 == 0: #Negative number has no even-powered \n #roots\n return None\n low = min(-1.0, x)\n high = max(1.0, x)\n ans = (high + low)/2.0\n while abs(ans**power - x) >= epsilon:\n if ans**power < x:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate DSA public/private keys, using moduli p/q and generator g | def gen_DSA_keys(p=DSA_P, q=DSA_Q, g=DSA_G):
private = randbelow(q)
public = pow(g, private, p)
return public, private | [
"def gen_DH_keys(p=DH_P, g=DH_G):\n private = randbelow(2**256) % p\n public = pow(g, private, p)\n return public, private",
"def ssh_keygen():\n dsa_filename = os.path.join(remote_home(), '.ssh', 'id_dsa')\n run(\"ssh-keygen -t dsa -P '' -f %s\" % dsa_filename)\n remote_pubkey()",
"def genera... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Produce a DSA signature tuple for a given message hash (as int), given a private key, moduli p/q and generator g | def sign_DSA(message_hash, private, p=DSA_P, q=DSA_Q, g=DSA_G):
k = randbelow(q)
r = pow(g, k, p) % q
k_inv = modinv(k, q)
s = k_inv*(message_hash+private*r) % q
return (r, s) | [
"def dsa_sign(\n message: bytes,\n private: DSA_Priv,\n hash_f=matasano.hash.SHA256,\n hash_to_int=DSA_hash_to_int\n) -> DSA_Signature:\n x, p, q, g = private\n\n r = 0\n s = 0\n\n digest = hash_to_int(hash_f(message))\n\n while s == 0:\n k = random.randint(1, q - 1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if a given DSA signature and public key matches a message hash | def verify_DSA(message_hash, signature, public, p=DSA_P, q=DSA_Q, g=DSA_G):
r, s = signature
if not (0 < r < q) or not (0 < s < q):
raise ValueError('Invalid signature values')
s_inv = modinv(s, q)
u1 = s_inv*message_hash % q
u2 = s_inv*r % q
mod1 = pow(g, u1, p)
mod2 = pow(public... | [
"def verify(hash, signature, key_path=\"~/.ssh/ida_rsa\"):\n key = open(expanduser(key_path), \"r\").read()\n rsakey = RSA.importKey(key) \n pubkey = key.publickey()\n return pubkey.verify(hash, b64decode(signature)) == True",
"def ecdsa_verify_hash(self, message_digest, signature):\n\t\tassert(isinst... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a message hash (as int) and DSA signature tuple, and the peruser random key 'k', recover the signer's private key | def recover_DSA_private(message_hash, signature, k, q=DSA_Q):
r, s = signature
r_inv = modinv(r, q)
return r_inv*((s*k)-message_hash) % q | [
"def sign( self, hash, random_k ):\n\n G = self.public_key.generator\n n = G.order()\n k = random_k % n\n p1 = k * G\n r = p1.x()\n if r == 0: raise RuntimeError(\"amazingly unlucky random number r\")\n s = ( numbertheory.inverse_mod( k, n ) * \\\n ( hash + ( self.secret_multiplier * r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removing english stop words from the each string in lists. This is a service function to be used on text before training on model | def remove_stopword(self, string_list):
stop_word_cleaned_sentences=[]
stop_words = set(stopwords.words('english'))
for string_value in string_list:
string_word_tokens = word_tokenize(string_value)
cleaned_words = [word for word in string_word_tokens if not word.lower() ... | [
"def removeStopwords(text=[], stopwords=[]):\n\ttext = [x for x in text if x.lower() not in stopwords]\n\ttext = [x for x in text if len(x) > 1]\n\treturn text",
"def stopwords_removal(self):\r\n tokenized_data=self.tokenization()\r\n stop_words = set(stopwords.words('english'))\r\n filtered_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
train word2vec embeddings using the gensim package fot both description and title | def train_word2vec_vectors(self,description_array,window_size_desc, title_array,window_size_title, seed ):
description_word_sequence = [text_to_word_sequence(text) for text in description_array]
self.description_word2vec_model = Word2Vec(description_word_sequence, window=window_size_desc, size=self.embe... | [
"def prepare_embedding_vectors(self,description_array, title_array,pretrained_emb=\"spacy\",window_size_desc=10,window_size_title=3,seed=42):\n\n #Either use pretrained embeddings downloaded from spacty or trained word2vec embedding on our data\n self.pretrained_emb_type=pretrained_emb\n if sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare Text vectorization layers and embeddings layers to be used later in network | def prepare_preprocessing_voc_layers(self, title_array,num_voc_title, description_array,num_voc_description, author_array,max_len_title,max_len_desc):
#Prepare description text vectorization layer. The layer handles tokenization (including handling punctuation) and converting to int sequences.
self.desc... | [
"def text_net(inputs,\r\n\t\t\t feat_layers=TextboxNet.default_params.feat_layers,\r\n\t\t\t anchor_sizes=TextboxNet.default_params.anchor_sizes,\r\n\t\t\t anchor_ratios = TextboxNet.default_params.anchor_ratios,\r\n\t\t\t normalizations=TextboxNet.default_params.normalizations,\r\n\t\t\t is_training=True,\r\n\t\t\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare embeddings based on pretrained spacy or train of description/title features. Creates embedding array the include embedding for each vocabulary wordfor description and title | def prepare_embedding_vectors(self,description_array, title_array,pretrained_emb="spacy",window_size_desc=10,window_size_title=3,seed=42):
#Either use pretrained embeddings downloaded from spacty or trained word2vec embedding on our data
self.pretrained_emb_type=pretrained_emb
if self.pretraine... | [
"def prepare_preprocessing_voc_layers(self, title_array,num_voc_title, description_array,num_voc_description, author_array,max_len_title,max_len_desc):\n #Prepare description text vectorization layer. The layer handles tokenization (including handling punctuation) and converting to int sequences.\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A decorator used to add a method as an event trigger | def register_trigger(self, event_name):
def decorator(f):
self.add_trigger(event_name, f)
return f
return decorator | [
"def event(\n arg: Optional[Union[FunctionType, str, Type[AggregateEvent]]] = None\n) -> CommandMethodDecorator:\n if arg is None:\n return event # type: ignore\n else:\n return CommandMethodDecorator(arg)",
"def add_method(cls: object):\n def decorator(func):\n @wraps(func)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initiate an event Event will be handled in separate thread. No guarantees it will be executed immediately after this call. | def initiate_event(self, event_name, event_args):
self.event_queue.put_nowait({"event_name": event_name, "event_args": event_args}) | [
"def _init_events(self):\n\n self._init_events_pipe()\n\n LOG.debug(\"Starting native event thread\")\n event_thread = native_threading.Thread(target=self._native_thread)\n event_thread.setDaemon(True)\n event_thread.start()\n\n LOG.debug(\"Starting green dispatch thread\")... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a map of pin locations of the instance offset | def get_layout_pins(self, inst):
# find the instance
for i in self.insts:
if i.name == inst.name:
break
else:
debug.error("Couldn't find instance {0}".format(inst.name), -1)
inst_map = inst.mod.pin_map
return inst_map | [
"def mapped_pins(self):\n return self._pin_mapping",
"def _genposmap(self):\n mc = self._pos.mc\n\n rngmap = np.zeros(mc.shape)\n for rngind, rng in enumerate(self._ranges):\n rng = self._ranges[rngind,:]\n # rngarray: 1 where mc matches current range, 0 where not... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts connection names to their spice hierarchy equivalent | def translate_nets(self, subinst_ports, port_dict, inst_name):
converted_conns = []
for conn in subinst_ports:
if conn in port_dict:
converted_conns.append(port_dict[conn])
else:
converted_conns.append("{0}{2}{1}".format(inst_name, conn, OPTS.hier_... | [
"def _get_connections_names() -> list[str]:\n\n # get the existing map\n connections_map = get_connections_map()\n return list(connections_map.keys()) or []",
"def interconnect_to_name(interconnect):\n return \"_\".join(sorted(check_and_format_interconnect(interconnect)))",
"def translate_connect_ar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for team_builder_config_product_groups_change_stream_get Create a change stream. | def test_team_builder_config_product_groups_change_stream_get(self):
pass | [
"def test_team_builder_config_product_groups_change_stream_post(self):\n pass",
"def test_portals_change_stream_get(self):\n pass",
"def test_template_permission_sets_change_stream_get(self):\n pass",
"def setup_streams(input_streams, trigger_stream_name, gripper_stream_name):\n in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for team_builder_config_product_groups_change_stream_post Create a change stream. | def test_team_builder_config_product_groups_change_stream_post(self):
pass | [
"def test_team_builder_config_product_groups_change_stream_get(self):\n pass",
"def test_portals_change_stream_post(self):\n pass",
"def test_template_permission_sets_change_stream_post(self):\n pass",
"def test_team_builder_config_product_groups_post(self):\n pass",
"def setup_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for team_builder_config_product_groups_count_get Count instances of the model matched by where from the data source. | def test_team_builder_config_product_groups_count_get(self):
pass | [
"def test_product_count(self):\n\n # test that enpoint returns the correct count of products\n rv = self.app.get('/product/count',\n headers=self.headers,\n content_type='application/json')\n data = json.loads(rv.get_data(as_text=True))\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for team_builder_config_product_groups_find_one_get Find first instance of the model matched by filter from the data source. | def test_team_builder_config_product_groups_find_one_get(self):
pass | [
"def test_team_builder_config_product_groups_id_product_group_get(self):\n pass",
"def test_team_builder_config_product_groups_id_get(self):\n pass",
"def test_team_builder_config_product_groups_id_exists_get(self):\n pass",
"def test_team_builder_config_product_groups_get(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for team_builder_config_product_groups_get Find all instances of the model matched by filter from the data source. | def test_team_builder_config_product_groups_get(self):
pass | [
"def test_team_builder_config_product_groups_id_product_group_get(self):\n pass",
"def test_team_builder_config_product_groups_find_one_get(self):\n pass",
"def test_team_builder_config_product_groups_id_get(self):\n pass",
"def test_team_builder_config_product_groups_id_builder_config_ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for team_builder_config_product_groups_id_builder_config_get Fetches belongsTo relation builderConfig. | def test_team_builder_config_product_groups_id_builder_config_get(self):
pass | [
"def test_team_builder_config_product_groups_find_one_get(self):\n pass",
"def test_team_builder_config_product_groups_id_get(self):\n pass",
"def test_team_builder_config_product_groups_id_product_group_get(self):\n pass",
"def test_team_builder_config_product_groups_get(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for team_builder_config_product_groups_id_delete Delete a model instance by {{id}} from the data source. | def test_team_builder_config_product_groups_id_delete(self):
pass | [
"def test_delete_groups_id(self):\n pass",
"def deleteGroup(id):",
"def test_delete_model_by_id(self):\n pass",
"def test_delete_device_group(self):\n pass",
"def test_prospects_delete_by_id(self):\n pass",
"def test_delete_groups_id_memberships(self):\n pass",
"def te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |