query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Download the Faasm runtime files | def download_runtime(ctx, nocodegen=False):
url = get_runtime_url()
tar_name = get_runtime_tar_name()
tar_path = get_runtime_tar_path()
# Clear out existing
if exists(FAASM_RUNTIME_ROOT):
print("Removing existing")
rmtree(FAASM_RUNTIME_ROOT)
# Download the bundle
print("Dow... | [
"def download_isos(self):\n\t\tself.__download_forcing()\n\t\tself.__download_forecast()\n\t\tself.__download_nowcast()",
"def download_assemblies(self):\n n = 0\n for name, barcode in self.__barcodes.items():\n # Put the assembly barcode into an URL for database search\n url =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints the version of various LLVM tools | def version(ctx):
bin_dir = join(TOOLCHAIN_INSTALL, "bin")
for exe in ["clang", "clang++", "llvm-ar", "wasm-ld"]:
bin_path = join(bin_dir, exe)
print("---- {} ----".format(exe))
call("{} --version".format(bin_path), shell=True)
print("") | [
"def show_version():\n return _run_speedify_cmd([\"version\"])",
"def get_system_version_info() -> str:\n output_template = '{:<12} {}'\n line_separator = '-' * 60\n not_found_str = '[Not Found]'\n out_lines = []\n\n # System (Python, OS)\n out_lines += ['System Version Info', line_separator]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copies the toolchain from a remote host | def scp_toolchain(ctx, user, host):
_scp_dir(user, host, "toolchain") | [
"def scp_sysroot(ctx, user, host):\n _scp_dir(user, host, \"llvm-sysroot\")",
"def FetchAndInstall(self, arch):\n # Fist get the URL for this architecture\n col = terminal.Color()\n print col.Color(col.BLUE, \"Downloading toolchain for arch '%s'\" % arch)\n url = self.LocateArchUrl(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copies the sysroot from a remote host | def scp_sysroot(ctx, user, host):
_scp_dir(user, host, "llvm-sysroot") | [
"def pull():\n\n buildout_directory = _env.hostout.options.get('path')\n fallback_user = _env.user or 'root'\n effective_user = _env.hostout.options.get('effective-user', fallback_user)\n local_sudo = _env.hostout.options.get('local-sudo') == \"true\"\n\n assert buildout_directory, u'No path found fo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method vacate target file if target file exist, and remove backup file if backup file exist. | def vacate_target_if_exist_and_remove_backup_if_exist(file_path) -> None:
if file_path.target.is_file():
os.replace(str(file_path.target), str(file_path.backup_for_test))
if file_path.backup.exists():
os.unlink(str(file_path.backup)) | [
"def deleteAutoSaveFile(self):\n filePath = self.filePath + '~'\n if self.filePath and os.path.exists(filePath):\n try:\n os.remove(filePath)\n except OSError:\n QtGui.QMessageBox.warning(self.activeWindow, 'TreeLine',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register shell context objects. | def register_shellcontext(app):
def shell_context():
"""Shell context objects."""
return {
'app': app,
'jwt': jwt,
'db': db,
'models': models}
app.shell_context_processor(shell_context) | [
"def register_shellcontext(app):\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\n 'db': db,\n 'model': models\n }\n\n app.shell_context_processor(shell_context)",
"def register(self, context):\n context.register(self._type, self)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subtract out the dimensionwise means | def remove_means(x, norm = False):
for i in range(x.shape[1]):
mu = x[:, i].mean()
x[:, i] -= mu
if norm:
for i in range(x.shape[1]):
sig = x[:, i].std()
x[:, i] /= sig
return x | [
"def demean(data):\n return data - data.mean()",
"def subtract_image_means(self):\n for data in [self.train_data, self.test_data]:\n if len(data) > 0:\n # Subtract the average pixel value from each channel.\n for i in range(self.image_info.num_channels):\n channel_avg = np.averag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Whitens using svd from np.linalg | def whiten(x):
covar = np.cov(x[:-dt, :].T)
u, s, vh = np.linalg.svd(covar)
covar_sqinv = u @ np.diag(np.sqrt(1/s)) @ vh
xw = x @ covar_sqinv.T
# xw = u @ vh
xw.astype(x.dtype)
return xw | [
"def svdinv(X, Nweights=None, factor=1E3, verbose=False):\n import scipy as sp\n import scipy.linalg as sl\n U, s, Vh = sl.svd(X)\n # U is mxm, s has length n,\n # and Vh (in general the Hermitian conjugate of V) is nxn.\n m = len(U)\n n = len(s)\n # Create the pseudo-inverse of S,\n # wh... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs PCA on the dataset, but only after the first start_cutoff datapoints | def pcaify(dataset):
pca = PCA()
pca_latent = pca.fit_transform(dataset.data[start_cutoff:])
return pca_latent | [
"def fselect_pca(self,proc):\n from sklearn.decomposition import PCA\n pca=PCA(n_components=proc).fit(self.X1)\n self.X1=pca.transform(self.X1)\n self.X2=pca.transform(self.X2)\n if self.X3:\n self.X3=pca.transform(self.X3)",
"def calculate_pca(data_set):\n transfo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets up the validation data loader via a Dictlike object. | def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
self._validation_dl ... | [
"def load_data(self, data=None):\n\t\tif data is None:\n\t\t\tself.data = load_data(self.path)\n\t\telse:\n\t\t\tself.data = data\n\n\t\tfrom . import validate_manifest\n\t\t# Validate the manifest with the base validation function in __init__\n\t\tvalidate_manifest(self.data, self.schema_validator, self.validation... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if link already added and if the link is valid. | def validate_link(link):
pass | [
"def checkLinkExists(self, soup, link):\n return bool(soup.find(\"a\", href=link))",
"def test_add_additional_URL_alreadythere(self):\n log_new_case(\"test_add_additional_URL_alreadythere\")\n\n # Test variables\n testhandle = self.handle_withloc\n url = 'http://first.foo'\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enriches the media objects referenced in a single item. First, a media item will be retrieved from the source, than the registered and configured tasks will run. In case fetching the item fails, enrichment of the media item will be skipped. In case a specific media enrichment task fails, only that task is skipped, whic... | def enrich_item(self, item):
try:
identifier = strip_scheme(item.identifier_url)
except AttributeError:
raise Exception('No identifier_url for item: %s', item)
try:
date_modified = item.date_modified
except AttributeError:
date_modified =... | [
"def process(self, media):\n raise NotImplementedError()",
"async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:\n _, camera_id, event_id = async_parse_identifier(item)\n url = self.events[camera_id][event_id][\"media_url\"]\n return PlayMedia(url, MIME_TYPE)",
"a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function trains a predictor using FTRidge. It runs over different values of tolerance and trains predictors on a training set, then evaluates them using a loss function on a holdout set. R is Phi (evaluation of basis functions) Ut is f (simulation of the derivatives) | def TrainFTRidge(R0, Ut, tol, lam, eta, maxit = 200, FTR_iters = 10, l0_penalty = None, normalize = 0, split = 0.8,
print_best_tol = False, plot_loss = False):
n,d = R0.shape
R = np.zeros((n,d), dtype=np.float32)
if normalize != 0:
Mreg = np.zeros(d)
for i in range(0,d):
... | [
"def Ridge(XTrain, XTest, yTrain, yTest,lamb,validate_testsize=0.2):\n\n Beta_Ridge = np.zeros((len(lamb),XTrain.shape[1])); MSE_lamb = np.zeros(len(lamb))\n\n XTraining, XValidate, yTraining, yValidate = train_test_split(XTrain,yTrain,test_size=validate_testsize)\n\n for i,lambval in enumerate(lamb):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function trains a predictor using STRidge. It runs over different values of tolerance and trains predictors on a training set, then evaluates them using a loss function on a holdout set. R is Phi (evaluation of basis functions) Ut is f (simulation of the derivatives) | def TrainSTRidge(R0, Ut, lam, eta, d_tol, maxit = 200, STR_iters = 10, l0_penalty = None, normalize = 0, split = 0.8,
print_best_tol = False, plot_loss = False):
n,d = R0.shape
R = np.zeros((n,d), dtype=np.float32)
if normalize != 0:
Mreg = np.zeros(d)
for i in range(0... | [
"def TrainFTRidge(R0, Ut, tol, lam, eta, maxit = 200, FTR_iters = 10, l0_penalty = None, normalize = 0, split = 0.8, \n print_best_tol = False, plot_loss = False):\n n,d = R0.shape\n R = np.zeros((n,d), dtype=np.float32)\n if normalize != 0:\n Mreg = np.zeros(d)\n for i in ran... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Keras Implementation of EEGNet | def EEGNet(nb_classes, Chans=32, Samples=128,
dropoutRate=0.5, kernLength=int(64 / 2), F1=8,
D=2, F2=16, norm_rate=0.25, dropoutType='Dropout'):
K.set_image_data_format('channels_first')
if dropoutType == 'SpatialDropout2D':
dropoutType = SpatialDropout2D
... | [
"def EEGNet(nb_classes, Chans=8, Samples=250,\n dropoutRate=0.5, kernLength=125, F1=7,\n D=2, F2=7, norm_rate=0.25, dropoutType='Dropout'):\n\n if dropoutType == 'SpatialDropout2D':\n dropoutType = SpatialDropout2D\n elif dropoutType == 'Dropout':\n dropoutType = Dropout\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return array of slit_spatId (MultiSlit, IFU) or ech_order (Echelle) values | def slitord_id(self):
if self.pypeline in ['MultiSlit', 'IFU']:
return self.spat_id
if self.pypeline == 'Echelle':
return self.ech_order
msgs.error(f'Unrecognized Pypeline {self.pypeline}') | [
"def getSecurities(self):\n\n exchange = {0:'NASDAQ', 1:'NYSE', 2:'ASE', 6:'OTC'}\n\n # Request number of securities in database\n if not self.sock.send('\\3'):\n print \"send 3 error\"\n self.close()\n return False\n\n ninfo = unpack('I',self.RecvAll(siz... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the length of each slit in pixels. By default, the method will return the tweaked slit lengths if they have been defined. If they haven't been defined the | def get_slitlengths(self, initial=False, median=False):
left, right, _ = self.select_edges(initial=initial)
slitlen = right - left
if median is True:
slitlen = np.median(slitlen, axis=1)
return slitlen | [
"def stimuliLength(self):\r\n return len(self.stimuli)",
"def get_size():\n l = self.linfeats.get_size()\n return 2*l + (l *(l-1)) / 2",
"def _slit_width(self):\n return self.phu.get(self._keyword_for('slit_width'))",
"def count_lit_pixels(self):\n return sum(sum(row) for ro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select between the initial or tweaked slit edges and allow for flexure correction. By default, the method will return the tweaked slits if they have been defined. If they haven't been defined the nominal | def select_edges(self, initial=False, flexure=None):
# TODO: Add a copy argument?
if self.left_tweak is not None and self.right_tweak is not None and not initial:
left, right = self.left_tweak, self.right_tweak
else:
left, right = self.left_init, self.right_init
... | [
"def slit_img(self, pad=None, slitidx=None, initial=False, \n flexure=None,\n exclude_flag=None, use_spatial=True):\n #\n if slitidx is not None and exclude_flag is not None:\n msgs.error(\"Cannot pass in both slitidx and exclude_flag!\")\n # Check the... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Construct an image identifying each pixel with its associated slit. The output image has the same shape as the original trace image. Each pixel in the image is set to the index of its associated slit (i.e, the pixel value is | def slit_img(self, pad=None, slitidx=None, initial=False,
flexure=None,
exclude_flag=None, use_spatial=True):
#
if slitidx is not None and exclude_flag is not None:
msgs.error("Cannot pass in both slitidx and exclude_flag!")
# Check the input
... | [
"def current_trace_locations(self):\n edge_img = np.zeros((self.nspec, self.nspat), dtype=int)\n if self.is_empty:\n return edge_img\n i = np.tile(np.arange(self.nspec), (self.ntrace,1)).T.ravel()\n edge_img[i, self.edge_img.ravel()] = np.tile(self.traceid, (self.nspec,1)).rav... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Generate an image with the normalized spatial coordinate within each slit. | def spatial_coordinate_image(self, slitidx=None, full=False, slitid_img=None,
pad=None, initial=False, flexure_shift=None):
# Slit indices to include
_slitidx = np.arange(self.nslits) if slitidx is None else np.atleast_1d(slitidx).ravel()
if full and len(_slitidx... | [
"def plot_latent_images(model, n, digit_size=128):\n\n norm = tfp.distributions.Normal(0, 1)\n grid_x = norm.quantile(np.linspace(0.05, 0.95, n))\n grid_y = norm.quantile(np.linspace(0.05, 0.95, n))\n image_width = digit_size*n\n image_height = image_width\n image = np.zeros((image_height, image_width))\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine the object positions expected by the slitmask design | def get_maskdef_objpos(self, plate_scale, det_buffer):
# midpoint in the spectral direction
specmid = self.left_init[:,0].size//2
# Unpack -- Remove this once we have a DataModel
obj_maskdef_id = self.maskdef_designtab['MASKDEF_ID'].data
# Distance (arcsec) of the object from t... | [
"def test_get_position(self):\n iv1 = GenomicSegment(\"chrA\", 100, 150, \"+\")\n iv2 = GenomicSegment(\"chrA\", 150, 200, \"+\")\n iv3 = GenomicSegment(\"chrA\", 250, 350, \"+\")\n\n mask = GenomicSegment(\"chrA\", 50, 125, \"+\")\n non_overlap_mask = GenomicSegment(\"chrA\", 400... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine the Slitmask offset (pixels) from position expected by the slitmask design | def get_maskdef_offset(self, sobjs, platescale, spat_flexure, slitmask_off, bright_maskdefid,
snr_thrshd, use_alignbox, dither_off=None):
if self.maskdef_objpos is None:
msgs.error('An array of object positions predicted by the slitmask design must be provided.')
i... | [
"def calc_sag_offset_idx(self):\n return self.offset_pnt-1",
"def average_maskdef_offset(calib_slits, platescale, list_detectors):\n\n calib_slits = np.array(calib_slits)\n if list_detectors is None:\n msgs.warn('No average slitmask offset computed')\n return calib_slits\n\n # unpack... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method determines the fwhm to use for the optimal extraction of maskdef_extract (i.e., undetected) objects. If the user provides a fwhm, it would be used. Otherwise fwhm will be computed using the average fwhm of the detected objects. | def get_maskdef_extract_fwhm(self, sobjs, platescale, fwhm_parset, find_fwhm):
msgs.info('Determining the FWHM to be used for the optimal extraction of `maskdef_extract` objects')
fwhm = None
if fwhm_parset is not None:
msgs.info(f'Using user-provided FWHM = {fwhm_parset}"')
... | [
"def get_fwhm(image, fwxm=0.5, upsampling=1):\n # compute weighted moments to get centroid\n x0,y0=np.unravel_index(np.argmax(image), image.shape)\n # x0 = numpy.floor(moments.x0)\n # y0 = numpy.floor(moments.y0)\n\n profile_x = image[int(x0), :]\n profile_y = image[:, int(y0)]\n\n max_val = im... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loop around all the calibrated detectors to extract information on the object positions expected by the slitmask design and the offsets between the expected and measure slitmask position. This info is recorded in the `SlitTraceSet` datamodel. | def get_maskdef_objpos_offset_alldets(sobjs, calib_slits, spat_flexure, platescale, det_buffer, slitmask_par,
dither_off=None):
# grab corresponding detectors
calib_dets = np.array([ss.detname for ss in calib_slits])
for i in range(calib_dets.size):
if calib_sl... | [
"def average_maskdef_offset(calib_slits, platescale, list_detectors):\n\n calib_slits = np.array(calib_slits)\n if list_detectors is None:\n msgs.warn('No average slitmask offset computed')\n return calib_slits\n\n # unpack list_detectors\n blue_and_red = list_detectors.ndim > 1\n spect... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loop around all the calibrated detectors to compute the median offset between the expected and measure slitmask position. This info is recorded in the `SlitTraceSet` datamodel. | def average_maskdef_offset(calib_slits, platescale, list_detectors):
calib_slits = np.array(calib_slits)
if list_detectors is None:
msgs.warn('No average slitmask offset computed')
return calib_slits
# unpack list_detectors
blue_and_red = list_detectors.ndim > 1
spectrograph_dets =... | [
"def measureSlitOffsets(self, detectorMap, lines, select, weights):\n sysErr = self.config.soften\n numFibers = len(detectorMap)\n fiberId = lines.fiberId\n xy = np.full((len(lines), 2), np.nan, dtype=float)\n isTrace = lines.description == \"Trace\"\n notTrace = ~isTrace\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loop around all the calibrated detectors to assign RA, DEC and OBJNAME to extracted object and to force extraction of undetected objects. | def assign_addobjs_alldets(sobjs, calib_slits, spat_flexure, platescale, slitmask_par, find_fwhm):
# grab corresponding detectors
calib_dets = np.array([ss.detname for ss in calib_slits])
for i in range(calib_dets.size):
msgs.info('DET: {}'.format(calib_dets[i]))
# Assign RA,DEC, OBJNAME to... | [
"def __call__(self,detections):\n \n # 1. predict new locations of all objects x_k | x_k-1\n for obj in self.active_objs:\n obj.predict()\n \n # 2. look at next set of detected objects - all objects are included in this even if detached\n # convert into numpy... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle GET requests for single product type | def retrieve(self, request, pk=None):
try:
product_type = ProductType.objects.get(pk=pk)
serializer = ProductTypeSerializer(product_type, context={'request': request})
return Response(serializer.data)
except Exception as ex:
return HttpResponseServerError(... | [
"def get_product(self, data):\n payload = {}\n payload.update(self.generic_service)\n payload.update(self.product_service)\n\n r = requests.get(\"http://catalog.bizrate.com/services/catalog/v1/us/{0}\".format(\"product\"), params=payload)\n print(\"URL: \")\n print(r.url)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle PUT requests for a product type | def update(self, request, pk=None):
product_type = ProductType.objects.get(pk=pk)
product_type.name = request.data["name"]
product_type.save()
return Response({}, status=status.HTTP_204_NO_CONTENT) | [
"def put(self, product_id: str) -> Response:\n data = request.get_json()\n put_user = Products.objects(id=product_id).update(**data)\n return jsonify({\"result\": put_user})",
"def edit_product(req):\n\n name = req.get('name', \"\")\n promo_category_id = req.get('promo_category_id', Non... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle DELETE requests for a single product type | def destroy(self, request, pk=None):
try:
product_type = ProductType.objects.get(pk=pk)
product_type.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
except ProductType.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=st... | [
"def delete(request):\n if request.method == \"POST\":\n if request.POST['type'] == 'file':\n uri = _download_url_to_ofs_url(request.POST['uri'])\n data = tsc.query_data([\"uri\", \"eq\", uri], limit=1, single=True)\n if data:\n resp = tsc.delete(data.id)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle GET requests to product types resource | def list(self, request):
types = ProductType.objects.all()
includeproducts = self.request.query_params.get('includeproducts', None)
if includeproducts is not None:
for product_type in types:
related_products = Product.objects.filter(product_type=product_type)[:3]
... | [
"def list(self):\n return self.call('catalog_product_type.list', [])",
"def get_products():",
"def get_product(self, data):\n payload = {}\n payload.update(self.generic_service)\n payload.update(self.product_service)\n\n r = requests.get(\"http://catalog.bizrate.com/services/c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dispatches a hook dictionary on a given piece of data. | def dispatch_hook(key, hooks, hook_data, **kwargs):
... | [
"def runHooks (c):\n info = D3PDObject._hookArgsStore.get (c.getName())\n if not info: return\n (hooks, args, hookargs) = info\n for h in hooks:\n h (c,\n *args,\n **hookargs\n )\n return",
"def call_hook(self, hook, *args, **... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the glove embedding using gensim's word2vec api | def load_glove_embeddings(self):
glove_embedding_location = "./data/word2vec/w2v.twitter.txt"
print("Loading glove embeddings...")
glove_model = models.KeyedVectors.load_word2vec_format(glove_embedding_location, binary=False)
return glove_model | [
"def load_word2vec_embeddings(path):\n en_model = KeyedVectors.load_word2vec_format(path, binary=True)\n embeddings_index = en_model.wv\n all_embeddings = np.stack(embeddings_index.syn0)\n del en_model\n embedding_mean,embedding_std = all_embeddings.mean(), all_embeddings.std()\n return embeddings... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the glove embedding for a single word | def glove_embedding_for_word(self, word):
return self.glove_model[word] | [
"def get_embedding(word, nlp):\n token = nlp(word)\n return token.vector",
"def embedding(self, sentence, word):\n encoded = self.tokenizer.encode_plus(sentence, return_tensors=\"pt\")\n\n with torch.no_grad():\n output = self.model(**self._to_device(encoded))\n\n if isinstan... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the emoji embeddings | def load_emoji_embeddings(self):
emoji_embeddings_path = "./data/word2vec/emoji2vec.bin"
print("Loading Emoji Embeddings")
model = models.KeyedVectors.load_word2vec_format(emoji_embeddings_path, binary=True)
return model | [
"def load_embeddings(self):\n\n path = os.path.join(self.train_path, 'char-CNN-RNN-embeddings.pickle')\n file = open(path, 'rb')\n embeddings = pickle.load(file, encoding = 'iso-8859-1')\n embeddings = np.array(embeddings)\n #embeddings = torch.from_numpy(embeddings)\n #emb... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take the average of emoji embeddings | def emoji_embeddings_for_tweets(self, tokens):
sum_vec = np.zeros(shape=(self.emoji_model.vector_size,))
emojis = 0
for token in tokens:
try:
if token.decode('utf-8') in self.emoji_model:
emojis += 1
sum_vec = sum_vec + self.emo... | [
"def words_avg_embedding(words: list, glove):\n\n word_embeddings = map(partial(get_word_vec, glove=glove), words)\n sum_words_embedding = reduce(np.add, word_embeddings)\n return sum_words_embedding / len(words)",
"def embed_avg(tgt_embed_list, pred_embed_list):\n Log.info(\"calculate the embed avg s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take the tiddlers from the given bag and inject them into a TiddlyWiki. | def list_tiddlers(self, bag):
self._prepare_twp(bag)
return self.build_non_js_version(bag) | [
"def import_wiki(store, wikitext, bagname='wiki'):\n parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder('beautifulsoup'))\n soup = parser.parse(wikitext)\n store_area = soup.find('div', id='storeArea')\n divs = store_area.findAll('div')\n\n for tiddler_div in divs:\n handle_tiddler_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
tickDist = distance between ticks (boxes) , len = length of rulers in boxWidths, labelDist = how many ticks between all labels | def drawRuler(self, miny, len, tickDist, labelDist):
tickHeight = 5
tickToLabelMargin = 5
self.canvas.create_line(self.minx,miny,self.minx+len*self.boxWidth,miny)
i = 0
# draw ticks and labels
#for x in range(self.minx,self.minx+len*self.boxWidth,tickDist*self.boxWidth):
... | [
"def generate_labels(self, max_label_size=50, min_label_size=10, tightness=200):\n self.labels = []\n placed = []\n padding = 5\n while tightness > 0:\n w = randrange(max_label_size - min_label_size) + min_label_size\n h = randrange(max_label_size - min_label_size) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a small frame of city data | def get_cities_frame(tc=_TkContext.implicit):
_TkContext.validate(tc)
global _cities_frame
if _cities_frame is None:
schema = zip('rank|city|population_2013|population_2010|change|county'.split('|'),
[int, str, int, int, str, str])
data = [[field for field in line.spli... | [
"def load_data(city, month, day):\n #Read in data for city\n df = pd.read_csv(city_data[city])\n #Rename blank column\n df = df.rename(columns={'Unnamed: 0':'Trip ID'})\n #Convert start time to datetime type\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n #Create column for month name\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an execution environment for the corresponding env. | def _get_execution_env(environment):
if environment is None:
typename = 'LocalAsync'
else:
typename = type(environment).__name__
tracker = _mt._get_metric_tracker()
tracker.track('deploy.job.create.%s' % typename.lower(), value=1)
if typename == 'Local':
exec_env = LocalExe... | [
"def get_env(self):\n if self.config.env:\n return env.Env(self.config.env)\n config = env.EnvConfig(host='localhost')\n return env.Env(config)",
"def get_execution_environment():\n gateway = get_gateway()\n j_execution_environment = gateway.jvm.org.apache.flink.api.j... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve Yarn application state | def get_yarn_application_state(environment, app_id, silent=False):
hadoop_base = "hadoop "
if environment.hadoop_conf_dir:
hadoop_base += "--config %s " % environment.hadoop_conf_dir
hadoop_jar = environment._get_hadoop_jar()
hadoop_cmd = "%s jar %s -jar %s -checkAppId %s -... | [
"def current_state():\n current_state = app_manager.current_status()\n click.echo(current_state)",
"def status_get(self, *, is_app=False):\n return self._run('status-get', '--include-data', f'--application={is_app}')",
"def status_get(self, *, is_app=False):\n return self._run('status-get', ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs a job asynchronously in a background process. 1. Create a /tmp directory for this execution 2. Serialize the Job to disk so can be read by other process 3. Start additional process 4. Return LocalAsynchronousJob object to caller | def run_job(job):
# Process launch with a pickled Job as file path and session.location
driver_file_path = _os.path.join(_os.path.dirname(__file__), '_graphlabJob.py')
path = job._get_exec_dir()
job_path = _os.path.join(path, 'job-definition')
_os.makedirs(path)
Executi... | [
"def create_call_async_job(config, internal_storage, executor_id, job_id, func, data, extra_env=None,\n extra_meta=None, runtime_memory=None, execution_timeout=EXECUTION_TIMEOUT):\n async_job_id = f'A{job_id}'\n return _create_job(config, internal_storage, executor_id, async_job_id, f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the value for blank fields in Django 1.8 and earlier. | def _get_blank_value_18(field):
if field.null:
return None
else:
return field.value_to_string(None) | [
"def get_empty_value_display(self, field_name=None):\n return mark_safe(self.empty_value_display)",
"def get_prep_value(self, value):\n if value == \"\":\n # If Django tries to save an empty string, send the db None (NULL).\n return None\n else:\n # Otherwise,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the value for blank fields in Django 1.9 and later. | def _get_blank_value_19(field):
if field.null:
return None
else:
return '' | [
"def get_empty_value_display(self, field_name=None):\n return mark_safe(self.empty_value_display)",
"def get_prep_value(self, value):\n if value == \"\":\n # If Django tries to save an empty string, send the db None (NULL).\n return None\n else:\n # Otherwise,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle the modbus step. | async def async_step_modbus(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
if user_input is None:
return self.async_show_form(
step_id="modbus", data_schema=STEP_MODBUS_DATA_SCHEMA
)
errors = {}
try:
title, data ... | [
"def handle(_event, message, controller):\n del _event\n\n log = logging.getLogger('enarksh')\n\n # Compose a response message for the web interface.\n response = {'ret': 0,\n 'new_run': 0,\n 'message': 'OK'}\n\n try:\n NodeActi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialise a Phoneme object from its IPA symbol, using a dictionary of IPA symbols and features | def from_symbol(cls, symbol: str, phonemes: dict):
phoneme = phonemes[symbol]
name = phoneme['name']
features = cls.parse_features(phoneme['features'])
return cls(symbol, name, features) | [
"def __init__(self, grammar):\r\n for name, symbol in grammar.symbol2number.items():\r\n setattr(self, name, symbol)",
"def __init__(self, language, symbols):\r\n self.language = LanguageWrapper(language)\r\n self.symbolData = symbols\r\n for symbolData in self.symbolData:\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns True if this Phoneme object's similarity to another Phoneme object is equal to or above the given threshold of similarity | def partial_equals(self, other, threshold=0.7):
similarity_ratio = self.similarity_ratio(other)
if similarity_ratio >= threshold:
return True
else:
return False | [
"def find_similar (self, iterable, threshold=0.3):\n \n raise NotImplementedError",
"def fuzzy_compare(m1: Minutia, m2: Minutia):\n\n def diff_in_threshold(m1: Minutia, m2: Minutia):\n \"\"\" Helper function to determine if x and y are in threshold (distance)\n The attributes of... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an 'intersection phoneme' between this Phone object and another | def intersection(self, other):
if self == other:
return self
elif other:
if other.symbol in self.parent_phonemes:
return self
intersection = FeatureValueDict(set(self).intersection(set(other)))
# create new parents
new_parents... | [
"def __and__(self, other) -> 'GeoBox':\n return geobox_intersection_conservative([self, other])",
"def __and__(self, other):\r\n return self.intersection(other)",
"def __iand__(self, other):\r\n self.intersection_update(other)\r\n return self",
"def intersection(self, other):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Picks the closest Phoneme object (using the similarity ratio) from an iterable of Phoneme objects | def pick_closest(self, other_phonemes):
closest = max(other_phonemes, key=lambda phoneme: self.similarity_ratio(phoneme))
return closest | [
"def find_closest(the_id, good_ids):\n score_list = []\n for choice in good_ids:\n score_list.append(fuzz.ratio(the_id, choice))\n score_array = np.array(score_list)\n max_index = np.argmax(score_array)\n good_choice = good_ids[max_index]\n return good_choice",
"def closest_val(mylist,mat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a `list` of all the `System` objects to the cluster. Updates every time no caching. | def get(self):
self.conn.connection._check_login()
response = self.conn.connection._do_get("{}/{}".format(self.conn.connection._api_url, "types/System/instances")).json()
all_system_objects = []
for system_object in response:
all_system_objects.append(self.conn.System.from_di... | [
"def get(self):\n self.conn.connection._check_login()\n response = self.conn.connection._do_get(\"{}/{}\".format(self.conn.connection._api_url, \"types/System/instances\")).json()\n all_system_objects = []\n for system_object in response:\n all_system_objects.append(SIO_System... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get ScaleIO SDC object by its name | def get_sdc_by_name(self, name):
for sdc in self.sdc:
if sdc.name == name:
return sdc
raise KeyError("SDC of that name not found") | [
"def get_sdc_object_by_pandas_name(pandas_name):\n if pandas_name in pandas_sdc_dict:\n return pandas_sdc_dict[pandas_name]\n else:\n return None # There is no match in Intel SDC to pandas_obj",
"def get_sdc_object(pandas_obj):\n if pandas_obj in pandas_sdc_dict:\n return pandas_sdc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get ScaleIO SDC object by its id | def get_sdc_by_id(self, id):
for sdc in self.sdc:
if sdc.id == id:
return sdc
raise KeyError("SDC with that ID not found") | [
"def get_sdc_object(pandas_obj):\n if pandas_obj in pandas_sdc_dict:\n return pandas_sdc_dict[pandas_obj]\n else:\n return None # There is no match in Intel SDC to pandas_obj",
"def get_sensor(id):\n query = {\"_id\": ObjectId(id)}\n if (sensor_db.count_documents(query)) < 1 :\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get ScaleIO SDC object by its ip | def get_sdc_by_ip(self, ip):
if self.conn.is_ip_addr(ip):
for sdc in self.sdc:
if sdc.sdcIp == ip:
return sdc
raise KeyError("SDS of that name not found")
else:
raise ValueError("Malformed IP address - get_sdc_by_ip()") | [
"def resolve_instance_by_ip(self, ip):\n return self.instances_ip[ip]",
"def get_connection(sid):\n for item in connections:\n if item['sid'] == sid:\n return item",
"def get_sdc_id(self, sdc_name=None, sdc_ip=None, sdc_id=None):\n\n id_ip_name = sdc_ip if sdc_ip else sdc_name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unregister SDC from MDM/SIO Cluster | def unregisterSdc(self, sdcObj):
# TODO:
# Add code that unmap volume if mapped
self.conn.connection._check_login()
response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "instances/Sdc::", sdcObj.id, 'action/removeSdc'))
return response | [
"async def deregister(self, ctx: Context):\n if ctx.channel.name != self._monitor_channel:\n return\n author_id = str(ctx.message.author.id)\n if author_id not in self._working_discord_mc_mapping:\n fmt = '<@!{}> You not currently have a Minecraft account reigstered.'\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Processes each windowed element by extracting the message body and its publish time into a tuple. | def process(self, element, publish_time=DoFn.TimestampParam):
yield (
element.decode("utf-8"),
datetime.utcfromtimestamp(float(publish_time)).strftime(
"%Y-%m-%d %H:%M:%S.%f"
),
) | [
"def __unpack__(self):\n for item in self.pubsub.listen():\n message = item.get('data')\n if item['type'] == 'message':\n yield message",
"def test_add_windowing_information(self):\n input_tweets, expected_output = static_data.get_static_windowing_information_dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
parse through the args are return the name of the datatimes file or none | def getDataTimes(args):
for v in args:
if '-DataTimes_L1' in v:
return v
return None | [
"def parse_datetime_from_filename (file_name_string) :\n \n datetime_to_return = None\n \n # check to see the type of the file, then let the appropriate code parse it\n if modis_guidebook.is_MODIS_file(file_name_string) :\n datetime_to_return = modis_guidebook.parse_datetime_from_filename(file... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
parse through the args are return the name of the SPQ file or none | def getSPQ(args):
for v in args:
if fnmatch.fnmatch(v, 'FU_?_SPQ_????????_v??.csv'):
return v
return None | [
"def _get_input_fname(self):\n fnames = self._get_fnames_from_related_checks()\n if len(fnames) > 1:\n msg = (\"referencing more than one file per check system \"\n \"is not yet supported by this script.\")\n raise SSGError(msg)\n return fnames.pop() if f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines which direction to turn in in order to avoid the obstacle (Currently turns in the direction which has less obstacle) Once direction has been established, checks to see if agent needs to continue turning or is safe TODO Should we force at least two turns...? | def determine_turn_direction(self, observations) -> bool:
# Check if the obstacle has been avoided
depth_map = (observations[0]["depth"]).squeeze() # TODO May need to trim edges, depending on FoV
# Squeeze edges to avoid the whole floor thing
depth_map = depth_map[:200,:]
... | [
"def _check_for_direction_change(self):\n # decide whether the enemies need to change direction\n turn_around = False\n if self._current_direction == Enemy.LEFT:\n left_most = self._find_leftmost()\n if left_most < self._bounds.left:\n turn_around = True\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get junctions within bbox of line and return both as numpy array | def np_array_bbox_points_line(line, tree_splitter):
# get junctions that contain within bbox line
pts_within_bbox = strtree_query_geoms(tree_splitter, line)
if len(pts_within_bbox) == 0:
# no point near bbox, nothing to insert, nothing to split
return None, None
# convert shapely lines... | [
"def insert_coords_in_line(line, tree_splitter):\n\n # get junctions that contain within bbox line\n pts_within_bbox = strtree_query_geoms(tree_splitter, line)\n\n # select junctions that are within tolerance of line\n tol_dist = 1e-8\n pts_on_line = list(\n itertools.compress(\n pt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert coordinates that are on the line, but where no vertices exists | def insert_coords_in_line(line, tree_splitter):
# get junctions that contain within bbox line
pts_within_bbox = strtree_query_geoms(tree_splitter, line)
# select junctions that are within tolerance of line
tol_dist = 1e-8
pts_on_line = list(
itertools.compress(
pts_within_bbox,... | [
"def _fill_done_line(cls, line):\n line[:] = [\n cls.TILE_EMPTY if tile == cls.TILE_UNKNOWN else tile\n for tile in line\n ]",
"def solveLine(self,line):\n\t\tmissing = filter(lambda x: x in line, self.board.vals)\n\t\tmissing = [val for val in self.board.vals if val not in lin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Split a LineString (numpy.array) with a Point or MultiPoint. This function is a replacement for the shapely.ops.split function, but faster. | def fast_split(line, splitter, is_ring):
# previously did convert geometries of coordinates from LineString and (Multi)Point
# to numpy arrays. This function now expect this as input to save time.
# line = np.array(line.coords)
# splitter = np.array([x for pt in splitter for x in pt.coords])
# loc... | [
"def np_array_bbox_points_line(line, tree_splitter):\n\n # get junctions that contain within bbox line\n pts_within_bbox = strtree_query_geoms(tree_splitter, line)\n\n if len(pts_within_bbox) == 0:\n # no point near bbox, nothing to insert, nothing to split\n return None, None\n # convert ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the signed area of a ring (polygon) | def signed_area(ring):
xs, ys = ring.T
signed_area = (xs * (np.roll(ys, -1) - np.roll(ys, +1))).sum() / 2
return signed_area | [
"def signed_area(self):\n flat = self.flatten()\n area = 0\n for s in flat.asSegments():\n area = area + (s.start.x * s.end.y) - (s.start.y * s.end.x)\n area = area / 2.0\n return area",
"def polygon_area(x, y):\r\n return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Provide information if a given ring is clockwise or counterclockwise. | def is_ccw(ring):
return signed_area(ring) >= 0.0 | [
"def isClockwise(ring, first, last):\n l = ring.perimeter\n (minx, maxx, _, _) = ring.bounds\n size = len(l)\n minimumX = maxx\n minimumDir = Polygonize._DOWN\n if first > last:\n last = last + size\n for i in range(first, last+1):\n indx = i - ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that compares two bounds with each other. Returns the max bound. | def compare_bounds(b0, b1):
if len(b0) and len(b1):
bounds = (
min(b0[0], b1[0]),
min(b0[1], b1[1]),
max(b0[2], b1[2]),
max(b0[3], b1[3]),
)
elif len(b0) and not len(b1):
bounds = b0
elif not len(b0) and len(b1):
bounds = b1
... | [
"def get_max_bounded(*args, low, high):\n result = args[0]\n for num in args:\n if (result <= low or result >= high):\n result = num\n if (low < num < high and num > result):\n result = num\n if (result > low and result < high):\n return result\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to create numpy array from nested lists. The shape of the numpy array are the number of nested lists (rows) x the length of the longest nested list (columns). Rows that contain less values are filled with np.nan values. | def np_array_from_lists(nested_lists):
np_array = np.array(list(itertools.zip_longest(*nested_lists, fillvalue=np.nan))).T
return np_array | [
"def convert_nest_lists_to_np_array(\n nested_list: types.NestedTensorOrArray,\n) -> Union[Tuple[Any], np.ndarray]:\n if isinstance(nested_list, collections_abc.Mapping):\n ordered_items = [\n (k, convert_nest_lists_to_np_array(v)) for k, v in nested_list.items()\n ]\n if isinstance(nested_list,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a set of input linestrings will create unique couple combinations. Each combination created contains a couple of two linestrings where the envelope overlaps each other. Linestrings with nonoverlapping envelopes are not returned as combination. | def select_unique_combs(linestrings):
# create spatial index
with ignore_shapely2_warnings():
tree_idx = STRtree(linestrings)
# get index of linestrings intersecting each linestring
idx_match = get_matches(linestrings, tree_idx)
# make combinations of unique possibilities
combs = []
... | [
"def geometry_free_linear_combination(obs1: np.ndarray, obs2: np.ndarray) -> np.ndarray:\n # Coefficient of linear combination\n n = 1\n m = -1\n\n # Generate linear combination\n return n * obs1 + m * obs2",
"def pairwise_linestring_intersection(\n linestrings1: \"GeoSeries\", linestrings2: \"G... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that force a certain winding order on the resulting output geometries. One can choose between `CCW_CW` and `CW_CCW`. `CW_CCW` implies clockwise for exterior polygons and counterclockwise for interior polygons (aka the geographical righthandrule where the right hand is in the area of interest as you walk the li... | def winding_order(geom, order="CW_CCW"):
# CW_CWW will orient the outer polygon clockwise and the inner polygon counter-
# clockwise to conform TopoJSON standard
if order == "CW_CCW":
geom = orient(geom, sign=-1.0)
elif order == "CCW_CW":
geom = orient(geom, sign=1.0)
else:
... | [
"def generate_wind():\n# Taken by converting UTM Zone 11 coordinates on\n# https://www.engineeringtoolbox.com/utm-latitude-longitude-d_1370.html\n# These values specific to files called yosemite_landscape_12-03-2019_0900_120m\n west_lon = -120.006255\n east_lon = -119.4736\n south_lat = 37.464649\n nort... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
prettify TopoJSON Format output for readability. | def prettify(topojson_object):
return pprint.pprint(topojson_object) | [
"def pretty_print(self): \n data = json.dumps(self.data, sort_keys=True, indent=4 * ' ')\n print(data)",
"def pretty_print_json(data):\n return json.dumps(data,ensure_ascii=False,sort_keys=True,indent=4)",
"def prettyJson(self) -> str:\n assert self._jdict is not None or self._jprogres... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to apply deltaencoding to linestrings. | def delta_encoding(linestrings):
for idx, ls in enumerate(linestrings):
if hasattr(ls, "coords"):
ls = np.array(ls.coords).astype(np.int64)
else:
ls = np.array(ls).astype(np.int64)
ls_p1 = copy.copy(ls[0])
ls -= np.roll(ls, 1, axis=0)
ls[0] = ls_p1
... | [
"def get_data_encoding():",
"def _term_and_encode(self, seq):\n if isinstance(seq, str):\n if seq.endswith(self.CR_LF_STR):\n return seq.encode()\n return \"\".join((seq, self.CR_LF_STR)).encode()\n if seq.endswith(self.CR_LF_BYTES):\n return seq\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function for solely detecting and recording duplicate LineStrings. The function converts and sorts the coordinates of each linestring and gets the hash. Using the hashes it can quickly detect duplicates and return the indices. | def find_duplicates(segments_list, type="array"):
# get hash of sorted linestring coordinates
hash_segments = []
if type != "array":
segments_list = [
np.array(list(linestring.coords)) for linestring in segments_list
]
for coordinates in segments_list:
# If start an... | [
"def find_line_with_hash(lines, hex_id):\n for line_id, line in enumerate(lines):\n if hex_id in line: \n return (line_id, line)\n\n return None",
"def select_unique_combs(linestrings):\n\n # create spatial index\n with ignore_shapely2_warnings():\n tree_idx = STRtree(linestri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function replace values elementwise in a numpy array. Its quick and avoids a np.whereloop (which is slow). The result is a new array, not inplace. | def map_values(arr, search_vals, replace_vals):
N = max(arr.max(), max(search_vals)) + 1
maparr = np.empty(N, dtype=np.int64)
maparr[arr] = arr
maparr[search_vals] = replace_vals
arr_upd = maparr[arr]
return arr_upd | [
"def _replace_nans(self, arr):\n arr = arr.copy()\n idxs = np.where(pd.isnull(arr))\n # note: two-line if condition below. indentation is confusing.\n if (isinstance(arr, pd.Series) or\n (isinstance(arr, np.ndarray) and len(arr.shape) == 1)):\n arr = np.asarray(arr)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
remove `|` + any spaces, in order to get alphabetic sort of first column | def table_sort(s):
return s.lstrip(" |") | [
"def sortCaseInsensitive():\n pass",
"def task2(text):\n return ' '.join(sorted(text.split(' ')))",
"def sort(self,line):\r\n\t\tcommands = line.split(' ')\r\n\t\tcommands.sort(cmp)\r\n\t\t\r\n\t\tline = \"\"\r\n\t\tfor command in commands:\r\n\t\t\tline += command + \" \"\r\n\t\t\r\n\t\treturn line[:-1]"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the local name associated with uri | def _local_name(self, uri: URIRef) -> str:
str_uri = str(uri)
if "://" not in str_uri:
return str_uri.rsplit(':', 1)[1] if ':' in str_uri else str_uri
prefix_len = 0
for _, prefix in self.source_graph.namespaces():
if str_uri.startswith(prefix):
pr... | [
"def get_localname(name: Any, localname=None) -> str:\n if localname:\n return safe(localname)\n\n # Remote file\n if try_except_pass((IOError, ValueError), urlopen, name):\n name = get_remotename(name)\n\n # Local file\n elif os.path.exists(name):\n name = get_genomename(name)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process a subclassof expression | def proc_subclass_of(self, sco: SubClassOf, ind: Individual) -> None:
self.proc_class_expression(sco.superClassExpression, ind) | [
"def subclasses(nodes, c):\n con = concrete(nodes)\n return filter (lambda node: node.name != c and node.is_a (c), con)",
"def test_subclass_of_base(self):\n self.assertTrue(issubclass(Square, Rectangle))",
"def has_subclass(entity, cfg):\n query = \"select (COUNT(DISTINCT ?s) AS ?count) where {... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process an equivalent classes expression | def proc_equivalent_classes(self, ec: EquivalentClasses, ind: Individual) -> None:
for ce in ec.classExpressions:
if isinstance(ce, Class):
self.realize_against(ind, ce)
else:
self.proc_class_expression(ce, ind) | [
"def process(self):\n\n self.process_classes(Steps.FLATTEN)\n self.filter_classes()\n self.process_classes(Steps.SANITIZE)\n self.process_classes(Steps.RESOLVE)\n self.process_classes(Steps.FINALIZE)\n self.designate_classes()",
"def visitClass(self, testClass):",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Realize subject, predicate, code as an Abox against the input ontology file | def realize(code: Union[str, URIRef], input_ontology_file: Union[str, TextIOWrapper], output_ontology_file: str,
subject: Union[str, URIRef] = EX.Joe, predicate: Union[str, URIRef_or_STR] = EX.hasDiagnosis,
skip_role_groups: bool = False, single_individuals: bool = False) -> bool:
input_onto... | [
"def main():\n\tdata = sys.argv[1:]\n\tmeaning = data[1].upper()\n\t\n\txml = ['<aiml>']\n\t\n\txml.append('<topic name=\"TRAINING%s\">' % data[0].upper())\n\n\txml.append('<category>')\n\txml.append('<pattern>%s MEANS *</pattern>' % meaning)\n\txml.append('<template>')\n\txml.append('<star/>')\n\txml.append('</tem... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test `dd()` of `del_db()` in `database.py`. | def test_del_db(self):
function_name = sys._getframe().f_code.co_name
db_name = "{}_{}".format(function_name, "db")
db_name_illegal_by_rdb = "{}_{}".format(
db_name,
self.ILLEGAL_BY_RDB
)
db_name_illegal_by_this_program = "{}_{}".format(
... | [
"def test_delete_database(self):\n with _mocked_session('delete', 204):\n cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')\n self.assertTrue(cli.delete_database('old_db'))",
"def test_db_remove():\n with patch.object(mysql, \"db_exists\", MagicMock(return_value=True... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render border around cmdhelp box | def render_cmdhelp_border(term: Terminal) -> str:
return render_border(
start_x=const.CMDHELP_X,
start_y=const.CMDHELP_Y,
height=const.CMDHELP_HEIGHT,
width=const.CMDHELP_WIDTH,
title=" Help ",
term=term,
) | [
"def render_cmdlist_border(term: Terminal) -> str:\n return render_border(\n start_x=const.CMDLIST_X,\n start_y=const.CMDLIST_Y,\n height=const.CMDLIST_HEIGHT,\n width=const.CMDLIST_WIDTH,\n title=\" Plan \",\n term=term,\n )",
"def draw_borders(self) -> None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render border around cmdlist box | def render_cmdlist_border(term: Terminal) -> str:
return render_border(
start_x=const.CMDLIST_X,
start_y=const.CMDLIST_Y,
height=const.CMDLIST_HEIGHT,
width=const.CMDLIST_WIDTH,
title=" Plan ",
term=term,
) | [
"def render_cmdhelp_border(term: Terminal) -> str:\n return render_border(\n start_x=const.CMDHELP_X,\n start_y=const.CMDHELP_Y,\n height=const.CMDHELP_HEIGHT,\n width=const.CMDHELP_WIDTH,\n title=\" Help \",\n term=term,\n )",
"def draw_borders(self) -> None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render border around userinput box | def render_user_input_border(term: Terminal) -> str:
return render_border(
start_x=const.INPUT_X,
start_y=const.INPUT_Y,
height=const.INPUT_HEIGHT,
width=const.INPUT_WIDTH,
title=" Input ",
term=term,
) | [
"def _render_field_border(self):\n with self.game_obj.attrmng(curses.color_pair(1)):\n self.game_obj.addstr(self.field['y'][0], self.field['x'][0], self.border['jtl'])\n self.game_obj.addstr(self.field['y'][1], self.field['x'][0], self.border['jbl'])\n self.game_obj.addstr(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render border around map | def render_map_border(term: Terminal, title: str) -> str:
cfg = BorderConfig(left="├", right="┤", top="┬", bottom="┴")
return render_border(
start_x=const.MAP_X,
start_y=const.MAP_Y,
height=const.MAP_HEIGHT,
width=const.MAP_WIDTH,
title=f" {title} ",
term=term,
... | [
"def _render_field_border(self):\n with self.game_obj.attrmng(curses.color_pair(1)):\n self.game_obj.addstr(self.field['y'][0], self.field['x'][0], self.border['jtl'])\n self.game_obj.addstr(self.field['y'][1], self.field['x'][0], self.border['jbl'])\n self.game_obj.addstr(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clear user input area | def clear_user_input_window(term: Terminal) -> str:
return clear_window(
pos_x=const.INPUT_X + 1,
pos_y=const.INPUT_Y + 1,
width=const.INPUT_WIDTH,
height=const.INPUT_HEIGHT,
term=term,
) | [
"def promptClear(self):\n\n self.string = \"\"\n self.position = 0\n self.view = 0",
"def clear_text():\r\n input_website.delete(0, 'end')\r\n input_pass.delete(0, 'end')",
"def clearText(self):\n self.txtIP.enterText(\"\")",
"def clear_input_field(self) -> None:\n\n #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply function for data frames. Returns a modified data frame where values in the specified columns have been modified using the given apply function. The apply function can be an apply factory. In this case, a separate instance of the function is generated and applied to each column. | def apply(df, columns, func):
return Apply(columns=columns, func=func).transform(df) | [
"def df_apply(df, funcs):\n if not callable(funcs) and not isinstance(funcs, dict):\n raise ValueError('Expected {} as argument of {}, got={}'.format(\n ', '.join([a.__name__ for a in [callable, dict]]), __name__, type(funcs)\n ))\n\n if callable(funcs):\n return df.apply(funcs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the list of column and the apply function. | def __init__(self, columns, func):
# Ensure that columns is a list.
self.columns = as_list(columns)
# Ensure that the function is a value function.
if not isinstance(func, ValueFunction):
# Instantiate the function if a class object is given
if isinstance(func, ty... | [
"def __init__(self, columns: Columns, func: UpdateFunction):\n # Ensure that columns is a list\n self.columns = columns\n self.func = get_update_function(func=func, columns=self.columns)",
"def __init__(self, data, columns:list):\n self.data = data.loc[:, columns]\n self.columns... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the video_codec of this AssetNonStandardInputReasons. | def video_codec(self, video_codec):
self._video_codec = video_codec | [
"def video(self, value):\n self._video = value",
"def set_codec(self, type_key, codec):\n self._codec_type_maps[type_key] = codec",
"def unexpected_media_file_parameters(self, unexpected_media_file_parameters):\n allowed_values = [\"non-standard\"] # noqa: E501\n if self.local_vars_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the audio_codec of this AssetNonStandardInputReasons. | def audio_codec(self, audio_codec):
self._audio_codec = audio_codec | [
"def audio(self, audio):\n # type: (string_types) -> None\n\n if audio is not None:\n if not isinstance(audio, string_types):\n raise TypeError(\"Invalid type for `audio`, type has to be `string_types`\")\n\n self._audio = audio",
"def setAudio(self, audio, mode):\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the video_gop_size of this AssetNonStandardInputReasons. | def video_gop_size(self, video_gop_size):
allowed_values = ["high"] # noqa: E501
if self.local_vars_configuration.client_side_validation and video_gop_size not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `video_gop_size` ({0}), must be one of {1}" ... | [
"def avc_max_gop_size_test(self, avc_max_gop_size_test):\n\n self._avc_max_gop_size_test = avc_max_gop_size_test",
"def mpeg_max_gop_test(self, mpeg_max_gop_test):\n\n self._mpeg_max_gop_test = mpeg_max_gop_test",
"def enableStopSize(self, size=0):\n\n if type(size) != int:\n pri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the video_frame_rate of this AssetNonStandardInputReasons. | def video_frame_rate(self, video_frame_rate):
self._video_frame_rate = video_frame_rate | [
"def force_frame_rate_attribute(self, force_frame_rate_attribute):\n # type: (bool) -> None\n\n if force_frame_rate_attribute is not None:\n if not isinstance(force_frame_rate_attribute, bool):\n raise TypeError(\"Invalid type for `force_frame_rate_attribute`, type has to be ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the video_resolution of this AssetNonStandardInputReasons. | def video_resolution(self, video_resolution):
self._video_resolution = video_resolution | [
"def video_resolution(self, res):\n self._video_resolution = tuple(res)\n self.process_image()\n self.clear_segments()",
"def yresolution(self, yresolution):\n if yresolution is None:\n raise ValueError(\"Invalid value for `yresolution`, must not be `None`\")\n self._... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the video_bitrate of this AssetNonStandardInputReasons. | def video_bitrate(self, video_bitrate):
allowed_values = ["high"] # noqa: E501
if self.local_vars_configuration.client_side_validation and video_bitrate not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `video_bitrate` ({0}), must be one of {1}" # no... | [
"def video_frame_rate(self, video_frame_rate):\n\n self._video_frame_rate = video_frame_rate",
"def setBitrate(self, bitrate):\n try:\n # bypassed by request from Ivan\n if (pu.pxpconfig.IgnoreVideoSettings()):\n dbg.prn(dbg.TDK,\"td -- SetBitrate BYBASSED\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the pixel_aspect_ratio of this AssetNonStandardInputReasons. | def pixel_aspect_ratio(self, pixel_aspect_ratio):
self._pixel_aspect_ratio = pixel_aspect_ratio | [
"def aspect_ratio(self, aspect_ratio):\n # type: (string_types) -> None\n\n if aspect_ratio is not None:\n if not isinstance(aspect_ratio, string_types):\n raise TypeError(\"Invalid type for `aspect_ratio`, type has to be `string_types`\")\n\n self._aspect_ratio = aspe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the video_edit_list of this AssetNonStandardInputReasons. | def video_edit_list(self, video_edit_list):
allowed_values = ["non-standard"] # noqa: E501
if self.local_vars_configuration.client_side_validation and video_edit_list not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `video_edit_list` ({0}), must be o... | [
"def audio_edit_list(self, audio_edit_list):\n allowed_values = [\"non-standard\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and audio_edit_list not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `audio_edit_list` ({0}... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the audio_edit_list of this AssetNonStandardInputReasons. | def audio_edit_list(self, audio_edit_list):
allowed_values = ["non-standard"] # noqa: E501
if self.local_vars_configuration.client_side_validation and audio_edit_list not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `audio_edit_list` ({0}), must be o... | [
"def set_allowed_list(self, assets, on_fail=None):\n if not listlike(assets):\n assets = [assets]\n control = TCWhiteList(assets, on_fail)\n self.register_trading_controls(control)",
"def _set_style_list(self):\n # list of style choices\n for idx in range(len(STYLE)):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the unexpected_media_file_parameters of this AssetNonStandardInputReasons. | def unexpected_media_file_parameters(self, unexpected_media_file_parameters):
allowed_values = ["non-standard"] # noqa: E501
if self.local_vars_configuration.client_side_validation and unexpected_media_file_parameters not in allowed_values: # noqa: E501
raise ValueError(
"I... | [
"def setErrorMaterial(self, material):\n self._errorMaterial = material",
"def rejected_for_ions(self, rejected_for_ions):\n\n self._rejected_for_ions = rejected_for_ions",
"def set_unknown_specials_spec(self, specialsspec):\n if self.frozen:\n raise RuntimeError(\"You attempted ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the unsupported_pixel_format of this AssetNonStandardInputReasons. | def unsupported_pixel_format(self, unsupported_pixel_format):
self._unsupported_pixel_format = unsupported_pixel_format | [
"def pixel_format(self, pixel_format):\n if pixel_format is not None:\n allowed_values = [\"Format1bppIndexed\", \"Format4bppIndexed\", \"Format8bppIndexed\", \"Format24bppRgb\", \"Format32bppArgb\"] # noqa: E501\n if pixel_format.isdigit():\n int_pixel_format = int(pixe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the ftp credentials for a host as a tuple containing 3 items in the order ('host', 'user', 'pass') This assumes a .netrc file exists in the users home directory and a valid host exists else you'll get prompted | def get_credentials(host):
try:
infos = netrc().authenticators(host)
# netrc returns (user,None,pass) - fix to match input for FTP
credentials = (host,infos[0],infos[2])
except NetrcParseError:
print('Sorry no netrc for this host, please enter in your credentials')
crede... | [
"def _get_netrc_credentials(self, filename=None):\n from netrc import NetrcParseError, netrc as NetrcFile\n source = None\n try:\n source = NetrcFile(filename)\n except NetrcParseError:\n return (None, None)\n\n hosts = [\n 'didel.script.univ-paris... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test Foreach.handle_message() when message targeted not to it. | def test_handle_message_wrong_target(self):
msg = Message(name='start', target='fake-id_10', origin='fake-id')
self.root.state = 'active'
self.foreach.state = 'active'
result = self.root.handle_message(self.ch, msg)
self.assertEqual(result, 'ignored') | [
"def test_handle_message_completed_from_non_last_child(self):\n\n msg = Message(name='completed', target='fake-id_0',\n origin='fake-id_0_0')\n self.root.state = 'active'\n self.foreach.state = 'active'\n self.foreach.context._props[\"inst:iteration\"] = 1\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test Foreach.handle_message() with start msg and empty select. | def test_handle_message_start_with_empty_select(self):
msg = Message(name='start', target='fake-id_0', origin='fake-id')
self.root.state = 'active'
self.root.context.set('prop2', {"subkey": []})
with patch('bureaucrat.flowexpression.Message') as MockMessage:
newmsg = Message... | [
"def test_handle_message_wrong_target(self):\n\n msg = Message(name='start', target='fake-id_10', origin='fake-id')\n self.root.state = 'active'\n self.foreach.state = 'active'\n result = self.root.handle_message(self.ch, msg)\n self.assertEqual(result, 'ignored')",
"def test_ha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test Foreach.handle_message() with start msg. | def test_handle_message_start(self):
msg = Message(name='start', target='fake-id_0', origin='fake-id')
self.root.state = 'active'
with patch('bureaucrat.flowexpression.Message') as MockMessage:
newmsg = Message(name='start', target='fake-id_0_0',
origin=... | [
"def test_handle_message_start(self):\n\n msg = Message(name='start', target='fake-id', origin='')\n newmsg = Message(name='start', target='fake-id_0', origin='fake-id')\n self.fexpr.state = 'ready'\n with patch('bureaucrat.flowexpression.Message') as MockMessage:\n MockMessag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test Foreach.handle_message() with complete msg from non last child. | def test_handle_message_completed_from_non_last_child(self):
msg = Message(name='completed', target='fake-id_0',
origin='fake-id_0_0')
self.root.state = 'active'
self.foreach.state = 'active'
self.foreach.context._props["inst:iteration"] = 1
self.foreach.co... | [
"def test_handle_message_completed_from_last_child(self):\n\n msg = Message(name='completed', target='fake-id_0',\n origin='fake-id_0_1')\n self.root.state = 'active'\n self.foreach.state = 'active'\n self.foreach.children[0].state = 'completed'\n self.foreach... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test Foreach.handle_message() with complete msg from last child. | def test_handle_message_completed_from_last_child(self):
msg = Message(name='completed', target='fake-id_0',
origin='fake-id_0_1')
self.root.state = 'active'
self.foreach.state = 'active'
self.foreach.children[0].state = 'completed'
self.foreach.context._pr... | [
"def test_handle_message_completed_from_non_last_child(self):\n\n msg = Message(name='completed', target='fake-id_0',\n origin='fake-id_0_0')\n self.root.state = 'active'\n self.foreach.state = 'active'\n self.foreach.context._props[\"inst:iteration\"] = 1\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |