query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Delay scan without the daq. | def empty_delay_scan(self, start, end, sweep_time, record=None,
use_l3t=False, duration=None):
self.cleanup_RE()
#daq.configure(events=None, duration=None, record=record,
# use_l3t=use_l3t, controls=[lxt_fast])
try:
RE(delay_scan([], lxt_... | [
"def wait_for_scan(self):\n while self.any_scans(bin(int(self.get_cond()))):\n time.sleep(1)",
"def flush_scan(self):\n self.scan = [None]*180",
"def _on_scan_complete(self):\n self._results.sync_q.put(None)",
"def scan_stop(self):\n self.__ble.gap_scan(None)",
"def man_scan(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
map vdjdb data from multiple studies to one tcrdist2 formatted pd.dataframe | def multistudy_vdjdb_to_tcrdist2(pd_df):
# 2 complex.id which links paired sequences is Reference (study specific)
studies = pd_df['Reference'].unique()
# break full df into sub dfs: one per study
dfs_split_by_study = {study: pd.DataFrame for study in studies}
for study in list(dfs_split_by_study.k... | [
"def dbs2data(dbs, data, fields):\n if isinstance(fields, str):\n fields = [fields]\n ufields = [\"dic_cell_id\"]\n # numfields = [\"dic\", \"salinity_v6\", \"alkalinity\", \"emf0\", \"pH_vindta_free_lab\", \"pH_vindta_temperature\"]\n for field in fields:\n data[field] = np.nan\n i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take a list and put an and between the two last items and add a period | def addingOxford(list):
length = len(list)
count = 0
sentence = ''
list.insert(-1, 'and')
for i in list:
sentence = sentence + i + ', '
count = count + 1
return sentence | [
"def format_list(my_list):\n my_list[-1] = \"and \" + my_list[-1] #add the and requirement to appear before the last item\n print(my_list, type(my_list))\n new_even_list = my_list[1::2]\n print(new_even_list, type(new_even_list))\n formated_string = \", \".join(new_even_list)\n print(formated_stri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
TemplateManager constructor Takes a directory name and searches that directory for photo templates. | def __init__(self, dirname):
self.templateDir = dirname
print("Template directory: " + self.templateDir)
self.templateList = list()
dirList = os.listdir(dirname)
for dir in dirList:
try:
reader = TemplateReader(self.templateDir + os.path.sep + dir, ... | [
"def from_directory(cls, path, **kwargs):\n registry = cls()\n for template_name in os.listdir(path):\n if template_name.startswith('.'):\n continue\n template_path = os.path.join(path, template_name)\n registry.add(template_name, Template.from_directory... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the number of templates in the list | def getCount(self):
return len(self.templateList) | [
"def numTemplates(self) -> retval:\n ...",
"def test_portals_id_templates_count_get(self):\n pass",
"def test_portals_id_template_folders_count_get(self):\n pass",
"def template_cycles(self) -> int:\n tokens = self._template_pattern.findall(self.structure)\n return sum((int(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the TemplateReader object at the specified index. | def getTemplateAtIndex(self, index):
return self.templateList[index] | [
"def getObject(self, index: long) -> object:\n ...",
"def get_instance(self, index):\n return self.instances[index]",
"def __getitem__(self, index):\n return self._renderers[index]",
"def row(self, index):\n return self.rows[index]",
"def __getitem__(self, index=0):\n if i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validates the file against a specific XML Schema Definition document. | def __validateFile(self):
xml_schema_doc = etree.parse(TemplateReader.TemplateXSD)
xmlSchema = etree.XMLSchema(xml_schema_doc)
return xmlSchema.validate(self.template_xml) | [
"def validate(xml_doc, xsd_file_name):\n\n with open(xsd_file_name, \"r\") as xsdf:\n xml_schema_data = xsdf.read()\n xml_schema_doc = etree.XML(xml_schema_data)\n xml_schema = etree.XMLSchema(xml_schema_doc)\n xml_doc = etree.XML(xml_doc)\n try:\n xml_schema.assertValid(xml_doc)\n e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the template.xml file and stores the data in the object | def __parseFile(self):
root = self.template_xml.getroot()
self.templateName = root.find(self.NS+"name").text
descriptionElem = root.find(self.NS+"description")
if(descriptionElem is not None):
self.description = descriptionElem.text
auth... | [
"def parse_from_template(template_name):\n thisdir = os.path.split(__file__)[0]\n filename = os.path.join(\n thisdir, '..', 'templates', '%s.xml' % template_name\n )\n with open(filename, 'rb') as f:\n xml = f.read()\n return parse_xml(xml)",
"def unpack_template(self):\n\n wit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the canvas object and it's contents | def __parseCanvas(self, canvas):
backgroundColorAttr = canvas.get("backgroundColor")
if(backgroundColorAttr is not None):
self.backgroundColor = backgroundColorAttr
self.height = int(canvas.get("height"))
self.width = int(canvas.get("width"))
backgroundP... | [
"def draw(self, canvas):\n pass",
"def render(self):\n\n # Create the canvas as a nested list.\n canvas = []\n for x in range(0, 10):\n canvas.append([None for i in range(0,10)])\n\n # Update canvas list with the canvas object's shapes.\n for shape in self.shap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the photo list object and it's contents | def __parsePhotoList(self, photoList):
self.photoList = list()
for photoSpec in photoList.getchildren():
height = int(photoSpec.get("height"))
width = int(photoSpec.get("width"))
x = int(photoSpec.get("x"))
y = int(photoSpec.get("y"))
if(photoS... | [
"def photos(self):\n return json.loads(self['photos']) or []",
"def read_gallery_list(self):\n pass",
"def parseImglist(self, imagelist):\r\n assert(os.path.exists(imagelist))\r\n #print(\"imagelist: \", imagelist)\r\n with open(imagelist, 'r') as f:\r\n lines = f.r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the path to the preview image file. | def getTemplatePreviewPath(self):
if(self.previewImageFilename != None):
return self.TemplateDir + os.path.sep + self.previewImageFilename
else:
return None | [
"def get_preview_url(self):\n\n\t\treturn self.__preview_url",
"def get_preview_file(self):\n import warnings\n warnings.warn(\"Documents.get_preview_file is deprecated. \"\n \"Use GetPreviewFile instead.\",\n DeprecationWarning,\n stack... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the size of the largest image. Currently assumes that all images will be the same aspect ratio | def getMaxImageSize(self):
maxSize = (0,0)
for spec in self.photoList:
if(spec['width'] > maxSize[0]):
maxSize = (spec['width'], spec['height'])
return maxSize | [
"def _geometry_from_largest(img, size):\n w, h = geometry(img)\n if w > h:\n return size, _proportional_dim(w, size, h)\n else:\n return _proportional_dim(h, size, w), size",
"def get_imgsize(self):\n return self.img_pil.size",
"def get_bounding_box_size(images):\r\n height = ma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a list of images and processes them according to the contained template. Returns a PIL Image object | def processImages(self, imageList):
# Create the initial canvas
canvasSize = (self.template.width, self.template.height)
if(self.template.backgroundColor != None):
canvasColor = self.hex_to_rgb(self.template.backgroundColor)
else:
canvasColor = (0,0,0,0)
... | [
"def load_multiple_images(self, filepath_list):\n self.image = Image.from_multiples(filepath_list)",
"def make_image_list(image_dir):",
"def par_template_match(self, list_image_file, template_file, draw_images=False, image_num=None,\n normalised_coords=True, threshold=0.99):\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the keras layer the given op is generated from. Returns None if op does not belong to any layer. Trace back from current scope to parent scope recursively until it reaches the outermost scope. | def belongs_to_keras_layer(op, scope_to_layer):
if not scope_to_layer:
return None
layer = None
scope = op.name
while True:
if scope in scope_to_layer:
layer = scope_to_layer[scope][0]
break
parent_scope = _parent_scope(scope)
# Already to the outtest scope.
if parent_scope == ... | [
"def get_layer(keras_tensor):\n layer = keras_tensor._keras_history[0]\n return layer",
"def _get_layer(self, name: str) -> layer.Layer:\n self._validate_layer_name(name)\n return self.layers[name]",
"def get_top_layer(self):\n return self._layers[-1]",
"def get_layer(self, name=Non... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get layer's inbound nodes. The config of a layer does not include connectivity information, nor the layer class name. These are handled by keras.Model. So we extract them from model's config and associate them to the corresponding layer. | def get_layer_inbound_nodes(layer_parent_pairs):
layer_inbound_nodes = {}
model = None
# Get a keras model which is a top-level layer.
for layer, parent_layer in layer_parent_pairs:
if parent_layer is None:
model = layer
break
if getattr(model, '_is_graph_network', None):
# Only graph net... | [
"def getInputNetworks(self):\n return self.inputFrames",
"def getInputNodes(self, nodeName):\n node = self.getNode(nodeName)\n inNodes = []\n for inp in node.input:\n if len([nde for nde in self.graph.node if inp in nde.output]):\n inNodes += [inp]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Visualise satellite data with pytroll. From a set of files containing satellite data, visualise channels and composites for the given regions/areas, possibly adding coastlines. | def show(
files,
channels,
composites,
regions,
d_out,
fn_out,
reader=None,
path_to_coastlines=None,
label="",
show_only_coastlines=False):
L = set()
sc = satpy.Scene(
filenames=[str(f) for f in files],
reade... | [
"def visualize_data(self):\r\n\r\n\t\tx = np.arange(self.tile_width)\r\n\t\ty = np.arange(self.tile_height)\r\n\t\thovertext = []\r\n\r\n\t\tcounts_list = np.zeros(len(self.data_list))\r\n\t\tfiles_list = np.empty(len(self.data_list), dtype = 'object')\r\n\r\n\t\tfor index, item in enumerate(self.data_list):\r\n\t\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show a video. Show a video with ABI MESO and GLM L2 C14_flash_extent_density. | def show_video_abi_glm(
files, out_dir,
img_out="{name:s}-{start_time:%Y%m%d_%H%M}.tiff",
vid_out="{name:s}-{start_time:%Y%m%d_%H%M}-"
"{end_time:%Y%m%d_%H%M}.mp4",
scene_kwargs={}):
(ms, mr) = scutil.get_resampled_multiscene(
files,
reader=["g... | [
"def Video(self):\n self = self._cam._AcqMode.Video\n self.__call__(start=False)",
"def show_video(req):\n board = req.user.board\n\n image_link = board.image_link()\n mid = str(board.mid)\n # if mid < 10:\n # mid = '0'+str(mid) \n # print 'mid',mid\n#\timage_link = board.image_link... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show a ABI/GLM video between start_date and end_date. | def show_video_abi_glm_times(
start_date, end_date, out_dir,
img_out="{platform_name}-{sensor}-{name}-"
"{start_time:%Y%m%d%H%M%S}-{end_time:%Y%m%d%H%M%S}.tif",
vid_out="{platform_name}-{name}-{area.area_id}-"
"{start_time:%Y%m%d%H%M%S}-{end_time:%Y%m%d%H%M%S}.mp4... | [
"def show_video_abi_glm(\n files, out_dir,\n img_out=\"{name:s}-{start_time:%Y%m%d_%H%M}.tiff\",\n vid_out=\"{name:s}-{start_time:%Y%m%d_%H%M}-\"\n \"{end_time:%Y%m%d_%H%M}.mp4\",\n scene_kwargs={}):\n (ms, mr) = scutil.get_resampled_multiscene(\n files,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run a ipynb and produce a html output | def run_ipynb(filepath):
filename = os.path.basename(filepath)
cmd = ('jupyter-nbconvert', '--to', 'html', '--execute',
'--ClearOutputPreprocessor.enabled=True', filepath, '--output',
filename)
subprocess.check_call(cmd) | [
"def test_notebook(path):\n import nbconvert\n print('Running ' + path + ' ... ', end='')\n sys.stdout.flush()\n\n # Load notebook, convert to python\n e = nbconvert.exporters.PythonExporter()\n code, __ = e.from_filename(path)\n\n # Remove coding statement, if present\n ipylines = ['ipython... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Strip a notebook from its output inplace | def clear_ipynb(filepath):
filename = os.path.basename(filepath)
cmd = ('jupyter-nbconvert', '--inplace', '--to', 'notebook',
'--ClearOutputPreprocessor.enabled=True', filepath, '--output',
filename)
subprocess.check_call(cmd) | [
"def strip_output(nb):\n keys = {'metadata': [], 'cell': {'metadata': [\"execution\"]}}\n\n nb.metadata.pop('signature', None)\n nb.metadata.pop('widgets', None)\n\n for field in keys['metadata']:\n pop_recursive(nb.metadata, field)\n\n if 'NB_KERNEL' in os.environ:\n nb.metadata['kerne... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
bitmap_to_mat takes a list of image filenames and returns a numpy 4D array of those images, dtype is uint8 matrix structure is (h,w,t) | def bitmap_to_mat(bitmap_seq, grayscale=True):
image_count = len(bitmap_seq)
shape = None
count = 0
for bitmap_file in bitmap_seq:
img = cv2.imread(bitmap_file)
if grayscale:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if shape is None: # first image read
... | [
"def bitmaps_to_matrix(bitmaps):\n bitmap_me, bitmap_other = bitmaps\n array_me = _bitmap_to_array(bitmap_me)\n array_other = _bitmap_to_array(bitmap_other)\n array = _merge_arrays(array_me, array_other)\n matrix = _array_to_matrix(array)\n return matrix",
"def read_images(fn_list):\r\n batch... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
import video using dtype=np.float64 to allow normalizing. use np.uint8 if not needed. sort frames in ascending number order because glob can return filenames in any order video_data is in [t,h,w] order and is a corder array ImData0 is in [h,w, t] order and is a fortran array | def import_video_as_frames(path, start, end, file_ending="jpg", work_type=np.float64):
frames_list = glob.glob(path + '*.'+file_ending)
frames_list.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
frames_list = frames_list[start:end+1]
print(f"first to load: {frames_list[0]}, last to load: {fram... | [
"def convert_frames_to_video(self, pathIn,pathOut):",
"def load_video(self, path):\n \n try:\n vid = imageio.get_reader(path, 'ffmpeg')\n except:\n raise Exception('Could not load the following video, as it may be corrupt: {}'.format(path))\n \n if not os.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
load a numpy array from a binary file (filename) and arrange it into an array with the provided dimensions and data type | def load_mat_from_bin(filename, dtype, shape):
f = open(filename, 'rb')
byte_array = f.read()
f.close()
np_array = np.frombuffer(byte_array, dtype=dtype)
np_array = np_array.reshape(shape)
return np_array | [
"def fread(f, n, dtype):\n if dtype is np.str:\n dt=np.uint8\n else:\n dt=dtype\n \n data_array=np.fromfile(f, dt, n)\n #data_array.shape=(n,1)\n return data_array",
"def read_array(filename, dtype, separator='\\t'):\n cast = numpy.cast\n data = [[] for dummy in xrange(len(dtype... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalize image so that (min, max) > (0, 1) | def normalizeImage(image):
image -= np.min(image)
image *= 1.0 / np.max(image) | [
"def normalize(img, new_min=0, new_max=255):\n old_min = np.min(img)\n old_max = np.max(img)\n if old_min == old_max:\n return img - old_min # return 0s\n img = (img - old_min) * ((new_max - new_min) / (old_max - old_min)) + new_min\n return img",
"def normalize(img,max_=255.0):\n img -= i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks if the rectangle [x1,y1,w1,h1] is contained in [x2,y2,w2,h2] | def contained_in(cc1, cc2):
x2, y2, w2, h2 = cc2
x1, y1, w1, h1 = cc1
if x2 < x1 and y2 < y1 and x1 + w1 < x2 + w2 and y1 + h1 < y2 + h2:
return True
return False | [
"def check_overlap(l1_x, l1_y, r1_x, r1_y, l2_x, l2_y, r2_x, r2_y):\r\n# If one rectangle is on total left side of other\r\n if bool(l1_x > r2_x) ^ bool(l2_x > r1_x):\r\n return False\r\n# If one rectangle is above other\r\n if bool(l1_y < r2_y) ^ bool(l2_y < r1_y):\r\n return False\r\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
given a number of labels, the labels np array and stats from cv2.connectedcomponents this function returns a new labels array and a dictionary of labels and areas such that all nested CCs have the same label and count towards the same area | def unite_nestedCCs(num_labels, labels, stats):
cc_dict = {}
nested_cc_list = [] # (1, 2) is in nested_cc_list iff cc 1's bbox is contained in cc 2's bbox
for i in range(0, num_labels):
# i ==0 is the background cc by convention. ignore
if i == 0:
continue
# extract s... | [
"def mapillary_label_building(filtered_image, label_ids):\n image_data = np.array(filtered_image)\n available_labels = np.unique(image_data)\n return {i: 1 if i in available_labels else 0 for i in label_ids}",
"def connectedComponentsLabelling(mask, connectivity=1, k=1):\n labels = skimage.measure.lab... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function takes a binary sparse array and filters it such that no object smaller than size_thresh remains | def filter_sparse_map(sparse_array, size_thresh=None):
connectivity = 8
if size_thresh is None:
size_thresh = (sparse_array.shape[0] * sparse_array.shape[1]) // 200
result_sparse_array = np.zeros_like(sparse_array)
for i in range(sparse_array.shape[2]):
num_labels, labels, stats, centro... | [
"def removeSmallBlobs( matrix, max_size ):\n \n # create a new output matrix\n label_matrix = zeros( matrix.shape )\n\n # use label() to find the connected sections of the input matrix\n labels, count = label( matrix, structure=ones( (3,3) ) )\n\n # only include blobs of size larger than max_size in the outpu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make a boolean variable that is True at each sample that exceeds the velocity threshold and is not during an ITI period | def valid_samples(self, velocity_threshold=5, **kwargs):
return (~ self.iti(**kwargs)) \
& (self.velocity(**kwargs) > velocity_threshold) | [
"def water_test(self):\n th_ndvi_A = 0.01\n th_nir_A = 0.11\n th_ndvi_B = 0.1\n th_nir_B = 0.05\n\n return (((self.ndvi < th_ndvi_A) & (self.nir < th_nir_A)) |\n ((self.ndvi < th_ndvi_B) & (self.nir < th_nir_B)))",
"def larger_than_min_t(self):\n total_t = len(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a new great_expectations directory and DataContext object in the provided project_root_dir. `create` will not create a new "great_expectations" directory in the provided folder, provided one does not already exist. Then, it will initialize a new DataContext in that folder and write the resulting config. | def create(cls, project_root_dir=None):
if not os.path.isdir(project_root_dir):
raise ge_exceptions.DataContextError(
"The project_root_dir must be an existing directory in which "
"to initialize a new DataContext"
)
ge_dir = os.path.join(project... | [
"def create_project_dirs() -> None:\n create_folder('cache/data')\n create_folder('cache/models')",
"def create_folder_structure(root_dir: Path):\n (root_dir / \"test\").mkdir(parents=True, exist_ok=True)\n (root_dir / \"train\").mkdir(exist_ok=True)\n (root_dir / \"val\").mkdir(exist_ok=True)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if all uncommitted direcotries exist. | def all_uncommitted_directories_exist(cls, ge_dir):
uncommitted_dir = os.path.join(ge_dir, "uncommitted")
for directory in cls.UNCOMMITTED_DIRECTORIES:
if not os.path.isdir(os.path.join(uncommitted_dir, directory)):
return False
return True | [
"def _is_incremental_dir(directory):\n walker = os.walk(directory)\n root, dirs, files = next(walker)\n assert root == directory\n if len(dirs) == 0 and len(files) == 0:\n return True\n if len(files) != 1 or files[0] != 'dataset.db':\n return False\n ok = ['loads', 'merges']\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if all config_variables.yml exists. | def config_variables_yml_exist(cls, ge_dir):
path_to_yml = os.path.join(ge_dir, cls.GE_YML)
# TODO this is so brittle and gross
with open(path_to_yml, "r") as f:
config = yaml.load(f)
config_var_path = config.get("config_variables_file_path")
config_var_path = os.pat... | [
"def check_vars_set():\n missing_vars = []\n\n for var in REQUIRED_VARS:\n if os.environ.get(var, None) is None:\n missing_vars.append(var)\n\n if len(missing_vars) > 0:\n print(\"Missing variables found: {missing_vars}\".format(\n missing_vars=missing_vars))\n\n retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy custom data docs templates | def scaffold_custom_data_docs(cls, plugins_dir):
styles_template = file_relative_path(
__file__, "../render/view/static/styles/data_docs_custom_styles_template.css")
styles_destination_path = os.path.join(
plugins_dir, "custom_data_docs", "styles", "data_docs_custom_styles.css")
... | [
"def add_context_data(app, pagename, templatename, context, doctree):\n context['site'] = app.site_data\n # The translation context is pinned to the Italian sources, as Sphinx has\n # it's own translation mechanism built in\n if 'language' in context and context['language'] != None:\n language = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy template notebooks into the notebooks directory for a project. | def scaffold_notebooks(cls, base_dir):
template_dir = file_relative_path(__file__, "../init_notebooks/")
notebook_dir = os.path.join(base_dir, "notebooks/")
for subdir in cls.NOTEBOOK_SUBDIRECTORIES:
subdir_path = os.path.join(notebook_dir, subdir)
for notebook in glob.gl... | [
"def copy_notebooks(\n path_root: str,\n docs_root: str = \"docs/source\",\n path_docs_ipynb: str = \"notebooks\",\n path_docs_images: str = \"_static/images\",\n ):\n ls_ipynb = []\n for sub in ([\"*.ipynb\"], [\"**\", \"*.ipynb\"]):\n ls_ipynb += glob.glob(o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize all Stores for this DataContext. | def _init_stores(self, store_configs):
for store_name, store_config in store_configs.items():
self.add_store(
store_name,
store_config
) | [
"def init_datastores(self):\n self.data = DatastoreLegacy(self.id_)\n self.data.subscribe(self.data_change)\n self.class_data = DatastoreLegacy(type(self).__name__)\n self.class_data.subscribe(self.class_data_change)",
"def populate_stores(self, stores):\n for self_store, other_stor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a new ValidationOperator to the DataContext and (for convenience) return the instantiated object. | def add_validation_operator(self, validation_operator_name, validation_operator_config):
self._project_config["validation_operators"][validation_operator_name] = validation_operator_config
self._project_config_with_variables_substituted["validation_operators"][validation_operator_name] = \
... | [
"def _add_logical_operator(self, operator):\n if not self.c_oper:\n raise QueryExpressionError(\"Logical operators must be preceded by a expression\")\n\n self.current_field = None\n self.c_oper = None\n\n self.l_oper = inspect.currentframe().f_back.f_code.co_name\n sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get file urls for all built local data docs. | def get_existing_local_data_docs_sites_urls(self):
from great_expectations.data_context.store import FixedLengthTupleFilesystemStoreBackend
ge_dir = os.path.abspath(self.root_directory)
sites = self.get_project_config().get("data_docs_sites")
existing_sites = []
for site_name, ... | [
"def urls(self) -> List[str]:\n return [file_.path for file_ in self.files.all()]",
"def data_urls(self):\n return [dinfo.data_url for dinfo in self.datainfo]",
"def get_all_document_uris(self, dataset_name: str) -> List[str]:\n uris = []\n doc_dump_dir = utils.get_documents_dump_dir... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Configurable delimiter character used to parse data asset name strings into \ ``NormalizedDataAssetName`` objects. | def data_asset_name_delimiter(self):
return self._data_asset_name_delimiter | [
"def data_asset_name_delimiter(self, new_delimiter):\n if new_delimiter not in ALLOWED_DELIMITERS:\n raise ge_exceptions.DataContextError(\"Invalid delimiter: delimiter must be one of: {}\".format(ALLOWED_DELIMITERS))\n else:\n self._data_asset_name_delimiter = new_delimiter",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
data_asset_name_delimiter property setter method | def data_asset_name_delimiter(self, new_delimiter):
if new_delimiter not in ALLOWED_DELIMITERS:
raise ge_exceptions.DataContextError("Invalid delimiter: delimiter must be one of: {}".format(ALLOWED_DELIMITERS))
else:
self._data_asset_name_delimiter = new_delimiter | [
"def setFieldDelimiter(self,delimiter):\n self._fieldDelimiter = delimiter;",
"def define_delimiter(self, delimiter):\n if delimiter.upper() not in self.VALID_DELIMITERS:\n raise AssertionError(\"Invalid delimiter. Delimiter must be COMMA, PIPE, TAB, COLON, or SEMICOLON.\")\n else... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the path where the projectnormalized data_asset_name expectations are stored. This method is used internally for constructing all absolute and relative paths for asset_namebased paths. | def _get_normalized_data_asset_name_filepath(self, data_asset_name,
expectation_suite_name,
base_path=None,
file_extension=".json"):
if base_path is None:
... | [
"def get_base_data_path() -> str:\n return os.path.realpath(\n os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\", \"data\")\n )\n )",
"def get_data_path():\n\n # Get pathname absolute or relative.\n path = os.path.join(\n os.path.dirname(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Yields a the next batch_kwargs for the provided data_asset_name, supplemented by any kwargs provided inline. | def yield_batch_kwargs(self, data_asset_name, **kwargs):
if not isinstance(data_asset_name, NormalizedDataAssetName):
data_asset_name = self.normalize_data_asset_name(data_asset_name)
datasource = self.get_datasource(data_asset_name.datasource)
generator = datasource.get_generator(d... | [
"def build_batch_kwargs(self, data_asset_name, *args, **kwargs):\n if isinstance(data_asset_name, (NormalizedDataAssetName, DataAssetIdentifier)):\n generator_name = data_asset_name.generator\n generator_asset = data_asset_name.generator_asset\n elif len(self._datasource_config[\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds batch kwargs for the provided data_asset_name, using an optional partition_id or building from provided kwargs. build_batch_kwargs relies on the generator's implementation | def build_batch_kwargs(self, data_asset_name, partition_id=None, **kwargs):
if not isinstance(data_asset_name, (NormalizedDataAssetName, DataAssetIdentifier)):
data_asset_name = self.normalize_data_asset_name(data_asset_name)
datasource = self.get_datasource(data_asset_name.datasource)
... | [
"def named_generator_build_batch_kwargs(self, generator_name, generator_asset, partition_id=None, **kwargs):\n generator = self.get_generator(generator_name=generator_name)\n if partition_id:\n batch_kwargs = generator.build_batch_kwargs_from_partition_id(\n generator_asset=g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a batch of data, using the namespace of the provided data_asset_name. | def get_batch(self, data_asset_name, expectation_suite_name, batch_kwargs=None, **kwargs):
normalized_data_asset_name = self.normalize_data_asset_name(data_asset_name)
datasource = self.get_datasource(normalized_data_asset_name.datasource)
if not datasource:
raise ge_exceptions.Data... | [
"def get_batch(self, data_asset_name, expectation_suite_name, batch_kwargs, **kwargs):\n if isinstance(data_asset_name, NormalizedDataAssetName): # this richer type can include more metadata\n if self._data_context is not None:\n expectation_suite = self._data_context.get_expectati... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run a validation operator to validate data assets and to perform the business logic around validation that the operator implements. | def run_validation_operator(
self,
validation_operator_name,
assets_to_validate,
run_id=None,
):
return self.validation_operators[validation_operator_name].run(
assets_to_validate=assets_to_validate,
run_id=run_id,
) | [
"def run_validation(self, data=empty):\n (is_empty_value, data) = self.validate_empty_values(data)\n if is_empty_value:\n return data\n\n value = self.to_internal_value(data)\n try:\n self.run_validators(value)\n value = self.validate(value)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a new datasource to the data context, with configuration provided as kwargs. | def add_datasource(self, name, initialize=True, **kwargs):
logger.debug("Starting ConfigOnlyDataContext.add_datasource for %s" % name)
if "generators" not in kwargs:
logger.warning("Adding a datasource without configuring a generator will rely on default "
"generat... | [
"def Datasource(**keywords):\n\n return CreateDatasource(keywords)",
"def add_datasource(pltid):\n user = root.authorized()\n if user != 'admin':\n return template('error', err=\"must be admin to edit plots\")\n app = request.forms.app\n r = request.forms\n datasource.insert(pltid=pltid, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the named datasource | def get_datasource(self, datasource_name="default"):
if datasource_name in self._datasources:
return self._datasources[datasource_name]
elif datasource_name in self._project_config_with_variables_substituted["datasources"]:
datasource_config = copy.deepcopy(
self.... | [
"def get_datasource(self, datasource_name: str) -> Datasource:\n try:\n datasource: Datasource\n return list(\n filter(\n lambda datasource: datasource.name == datasource_name,\n self.datasources,\n )\n )[0]\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of available expectation suite keys. | def list_expectation_suite_keys(self):
keys = self.stores[self.expectations_store_name].list_keys()
return keys | [
"def list_available_expectation_types(self):\n keys = dir(self)\n return [\n expectation for expectation in keys if expectation.startswith(\"expect_\")\n ]",
"def keys(self):\n return [requirement.key for requirement in self]",
"def get_available_tests(self):\n retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalizes data_asset_names for a data context. A data_asset_name is defined perproject and consists of three components that together define a "namespace" for data assets, encompassing both expectation suites and batches. Within a namespace, an expectation suite effectively defines candidate "types" for batches of dat... | def normalize_data_asset_name(self, data_asset_name):
if isinstance(data_asset_name, NormalizedDataAssetName):
return data_asset_name
elif isinstance(data_asset_name, DataAssetIdentifier):
return NormalizedDataAssetName(
datasource=data_asset_name.datasource,
... | [
"def _get_normalized_data_asset_name_filepath(self, data_asset_name,\n expectation_suite_name,\n base_path=None,\n file_extension=\".json\"):\n if base_path is None:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a named expectation suite for the provided data_asset_name. | def get_expectation_suite(self, data_asset_name, expectation_suite_name="default"):
if not isinstance(data_asset_name, NormalizedDataAssetName):
data_asset_name = self.normalize_data_asset_name(data_asset_name)
key = ExpectationSuiteIdentifier(
data_asset_name=DataAssetIdentifie... | [
"def normalize_data_asset_name(self, data_asset_name):\n\n if isinstance(data_asset_name, NormalizedDataAssetName):\n return data_asset_name\n elif isinstance(data_asset_name, DataAssetIdentifier):\n return NormalizedDataAssetName(\n datasource=data_asset_name.data... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save the provided expectation suite into the DataContext. | def save_expectation_suite(self, expectation_suite, data_asset_name=None, expectation_suite_name=None):
if data_asset_name is None:
try:
data_asset_name = expectation_suite['data_asset_name']
except KeyError:
raise ge_exceptions.DataContextError(
... | [
"def add_suite(self, suite):\n suite.parent_suite = self\n self._suites.append(suite)",
"def _initialize_expectations(\n self,\n expectation_suite: ExpectationSuite = None,\n expectation_suite_name: str = None,\n ):\n # Checking type of expectation_suite.\n # Ch... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches all validation parameters for a given run_id. | def get_parameters_in_evaluation_parameter_store_by_run_id(self, run_id):
if self.evaluation_parameter_store.has_key(run_id):
return copy.deepcopy(
self.evaluation_parameter_store.get(run_id)
)
else:
return {} | [
"def get_runs_by_id(self, config_id):\n\t\td = self.data[config_id]\n\n\t\truns = []\n\t\tfor b in d.results.keys():\n\t\t\ttry:\n\t\t\t\terr_logs = d.exceptions.get(b, None)\n\n\t\t\t\tif d.results[b] is None:\n\t\t\t\t\tr = Run(config_id, b, None, None , d.time_stamps[b], err_logs)\n\t\t\t\telse:\n\t\t\t\t\tr = R... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compiles all current expectation configurations in this context to be ready for result registration. | def _compile(self):
# Full recompilation every time
self._compiled_parameters = {
"raw": set(),
"data_assets": {}
}
for key in self.stores[self.expectations_store_name].list_keys():
config = self.stores[self.expectations_store_name].get(key)
... | [
"def compile(self):\n if self.__compiler is None:\n self.__compiler = Compiler();\n\n if self.__trackResources:\n for cpass in self.__compiler.getPassConfig().getPasses():\n self.addObjectResource(cpass);\n\n self.__compiler.compile(self);\n\n self.__... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get validation results from a configured store. | def get_validation_result(
self,
data_asset_name,
expectation_suite_name="default",
run_id=None,
validations_store_name="validations_store",
failed_only=False,
):
selected_store = self.stores[validations_store_name]
if not isinstance(data_asset_name, ... | [
"def validation_results(self) -> Dict[int, Dict[str, ValidatorOutput]]:\n return self.scene_validation_results",
"def check_results(self):\n\n if self.results is None:\n return self.catalog\n\n else:\n return self.results",
"def get_profile_validators(successful_retrie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Profile the named datasource using the named profiler. | def profile_datasource(self,
datasource_name,
generator_name=None,
data_assets=None,
max_data_assets=20,
profile_all_data_assets=True,
profiler=BasicDatasetPr... | [
"def _profile(self):\n if self.args.profile == \"console\":\n self._console_profile(self._main)\n else:\n cProfile.runctx(\"self._main()\", locals(), locals(), self.args.profile)",
"def profile(self, profile=None):\n if profile is None:\n return int(self._conn... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search for the yml file starting here and moving upward. | def find_context_yml_file(cls, search_start_dir=os.getcwd()):
yml_path = None
for i in range(4):
logger.debug("Searching for config file {} ({} layer deep)".format(search_start_dir, i))
potential_ge_dir = os.path.join(search_start_dir, cls.GE_DIR)
if os.path.isdir(... | [
"def find_config_file(src: Path) -> Path:\n parents = src.resolve().parents\n paths_to_search_in = [src] + list(parents) if src.is_dir() else parents\n for path in paths_to_search_in:\n if (path / \"sniptly.toml\").exists():\n return path / \"sniptly.toml\"\n raise FileNotFoundError(\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the active_player of this Arena. | def active_player(self, active_player: int):
self._active_player = active_player | [
"def set_active(self, player: Player) -> None:\n self._scoreboxes[player].set_active()",
"def active(self, active):\n \n self._active = active",
"def handle_set_now_playing_player(self, player: pb.NowPlayingPlayer) -> None:\n self.active_player = self.get_player(player)\n\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the bounds of this Arena. | def bounds(self) -> Bounds:
return self._bounds | [
"def bounds(self):\n raise NotImplementedError",
"def verifiable_bounds(self):\n return self._verifiable_bounds",
"def getBounds(self, *args):\n return _coin.SbBox2d_getBounds(self, *args)",
"def get_bounds(self):\n # retrieve the current center position\n position = self.ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the bounds of this Arena. | def bounds(self, bounds: Bounds):
self._bounds = bounds | [
"def update_bounds(self, bounds):\n self.bounds = bounds",
"def setBounds_0(self, bounds):",
"def set_bounds(var, varname):\n var.attrs[boundsvar] = varname",
"def set_bounds(self):\n latitude_min=sys.float_info.max #set smalest/largest possible value to compare against\n latitude_max=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the terrain of this Arena. | def terrain(self) -> TerrainGrid:
return self._terrain | [
"def terrainAt(self, coords):\n tile = self.tileAt(coords)\n if tile is not None:\n return tile.terrainAt(coords-tileCenter(coords))\n else: return None",
"def get_terrain_rgb(self, terrain, elev):\n if self.mode == \"elevmap\" and terrain != \"~\":\n return rgb_v... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the terrain of this Arena. | def terrain(self, terrain: TerrainGrid):
self._terrain = terrain | [
"def setTerrainCost(self, terrain, cost):\n self.match.stack.push(stack.Call(self._setTerrainCost, terrain, cost),\n stack.Call(self._setTerrainCost, terrain, self.terrainCosts.get(terrain)))",
"def terrain(self) -> TerrainGrid:\n return self._terrain",
"def __init__(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the effects of this Arena. | def effects(self, effects: EffectGrid):
self._effects = effects | [
"def impact_effects(self, impact_effects):\n\n self._impact_effects = impact_effects",
"def effects(self,infostate,knowledge):\n Move.effects(self,infostate)",
"def apply(self, player):\n player.add_effect(*self.effects)",
"def setEffect(self, effect):\n self.listener.sendData(\"%s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the robots of this Arena. | def robots(self) -> List[Robot]:
return self._robots | [
"def get(self):\n return retrieve_robot_list()",
"def get_robots():\n ret_bots = {} # Robot dictionary\n for num in range(NUM_ROBOTS):\n ret_bots[num] = create_robot()\n # clear prompt\n robot.prompt = \"\"\n return ret_bots",
"def robots(self):\n self.login()\n respo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the robots of this Arena. | def robots(self, robots: List[Robot]):
self._robots = robots | [
"def set_robot(self, robot: robot_base.RobotBase):\n self._robot = robot",
"def update_robots(self):\n for robot_item in self.get_robot_graphics_items():\n robot_item.updateAll()",
"def robots(self) -> List[Robot]:\n return self._robots",
"def __setup_robots(self, robot_locations, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add query methods to `Queries` instance. | def add_queries(self, queries: List[QueryFn]):
for fn in queries:
query_name = fn.__name__.rpartition(".")[2]
self.add_query(query_name, MethodType(fn, self)) | [
"def select_query(self, queries):\n\n raise NotImplementedError",
"def add_query(self, query, *args):\n self.queries.append((query, args))",
"def query(self, query):\n self.operation += query.operation\n self.parameters += query.parameters",
"def _get_queries(self, migration, metho... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load Queries from a list of `QuaryDatum` | def load_from_list(self, query_data: List[QueryDatum]):
for query_datum in query_data:
self.add_queries(_create_methods(query_datum, self.is_aio))
return self | [
"def load_query_samples(self, sample_index_list):\n pass",
"def load_joint_queries(name_list, path, lb, ub):\n ut_dict_list = [] # return1: query-dictionary list\n \n load_num = ub-lb+1 # find inclusive interval\n for name in name_list:\n ut_dict = {}\n\n for idx in range(load_num... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load Queries from a `QuaryDataTree` | def load_from_tree(self, query_data_tree: QueryDataTree):
for key, value in query_data_tree.items():
if isinstance(value, dict):
self.add_child_queries(key, Queries(self.driver_adapter).load_from_tree(value))
else:
self.add_queries(_create_methods(value, s... | [
"def load_parsed(self):\n self.connect()\n cursor = self.execute(\"SELECT id, text FROM queries\")\n for row in cursor.fetchall():\n d = { k:row[k] for k in row.keys() }\n parsetree = parse_query(row[\"text\"])\n if parsetree is not None:\n logger... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tmax, Tmin, and Tavg for list of dates | def calc_temps(start_date, end_date):
return session.query(func.max(Measurement.tobs), func.min(Measurement.tobs), func.avg(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() | [
"def get_avg_min_and_max_temps(self, dates_and_temps):\n data = []\n\n for key in dates_and_temps:\n total_tmax, total_tmin, max_days, min_days = dates_and_temps[key]\n data.append({\n \"date\": key,\n \"tmax\": round(total_tmax / max_days) if max_da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
dumps a CSV file with the summary of occupancies | def dumpCSVOccupancy(momentSummary,sensorPos,outdir):
xbias=0.5
import csv
fOut=open(os.path.join(outdir,'occupancy_summary.dat'),'w')
csv_writer = csv.writer(fOut, delimiter=',')
for waferKey,counts in sorted(momentSummary.items(), key=lambda x: x[0]):
isd,ilay,iu,iv=waferKey
... | [
"def make_enrollment_csv(self):\n c_all = cont.Contact.objects.all()\n\n enrollment_counts = collections.OrderedDict()\n\n for c in c_all:\n key = c.created.strftime('%Y-%U')\n try:\n enrollment_counts[key][c.facility] += 1\n except KeyError as e:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
builds the longitudinal profile summary for a particular wafer | def getWaferUVSummary(momentSummary,sensorPos,proc,waferLongProfiles):
waferUVSummary={}
xbias=0.5
#loop over each sub-detector layer
subdets=set([x[0] for x in sensorPos])
for sd in subdets:
for waferKey in momentSummary:
isd,ilay,iu,iv=waferKey
if isd != sd:... | [
"def calculate_stats():\n nonlocal convert_nmd_tuple\n \n oldest_person_age_days = max(profile_df['age'])\n avg_age_days = profile_df['age'].mean()\n mean_current_location = profile_df['lat'].mean(),profile_df['long'].mean()\n largest_blood_type = profile_df['blood_group'].... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates the attribute dictionaries | def createAttrDicts():
ret = {}
# lfw v1.1
ret['lfw_v1.1'] = d = {}
fields = getmodelfields('lfw_v1.1')
for l in open('attrnames.txt'):
num, name = l.strip().split('\t', 1)
if name not in fields: continue
d[num] = d[int(num)] = d[name] = name
return ret | [
"def build_attributes(self):\n pass",
"def initAttributes(self):\n pass",
"def __compute_attrs(self):\n attributes = {}\n for attr in self.policy.typeattributes():\n attributes[str(attr)] = set(str(x) for x in attr.expand())\n return attributes",
"def create_custom_at... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns similarity features computed from the features for two objects. 'meths' are one or more of 'absdiff', 'diffsq', 'prod', 'avg', 'concat' with a weight val, as a 2ple. If 'weighted' is > 0, then weights differences using a gaussian of the given variance | def getSimFeaturesFromFvals(fvals1, fvals2, meths):
from array import array
allmeths = {'absdiff': lambda a,b: abs(a-b), 'diffsq': lambda a,b: (a-b)**2, 'prod': lambda a,b: a*b, 'avg': lambda a,b: (a+b)/2.0, 'concat': None}
ret = array('d')
for meth, weighted in meths:
assert meth in allmeths
... | [
"def jaccardSimilarity(method1, method2, weights, nl_sim):\n info_dict = defaultdict(Counter)\n \n # f1 is a conscise way of representing features dictionary for method1\n f1 = method1.features\n f2 = method2.features\n commonFeatures = (f1.keys() & f2.keys()) - set(method.lang_features)\n sim_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Classify multiple items in bulk. Returns (label, value) pairs for each input. | def bulkclassify(model, seqOfFvals):
try:
seqOfFvals = [applyScales(fvals, model.scales) for fvals in seqOfFvals]
except AttributeError: pass
return model.predict_many(seqOfFvals) | [
"def data_classification(self, data=[]):\n data_type = ''\n self.logger.info('Attempting to classify: {0}'.format(data))\n #This section classifies an input as heartbeat, expecting integer\n if len(data) == 1:\n try:\n value = data[0]\n int(value)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the model and parameters for the given model name. Returns (model, simmeths) | def readmodel(model):
if model not in MODELS:
raise web.notfound('No model %s. Choices are: %s' % (model, ', '.join(MODELS)))
modelfname = model+'.model'
from svm import svm_model
t1 = time.time()
model = svm_model(modelfname)
f = open(modelfname.replace('.model', '.params'))
model.s... | [
"def read_model(self):\n f1 = open(self.name + '_' + 'words', 'r')\n f2 = open(self.name + '_' + 'word_lengths', 'r')\n f3 = open(self.name + '_' + 'stems', 'r')\n f4 = open(self.name + '_' + 'sentence_lengths', 'r')\n f5 = open(self.name + '_' + 'word_pair', 'r')\n d_str1 ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the feature vector (as a dict) for the given name in the given dataset | def getfdict(dataset, name):
import simplejson as json
from urllib import urlopen
from pprint import pprint
if dataset not in DATASETS:
raise web.notfound('No dataset with dataset %s. Choices are: %s' % (dataset, ', '.join(DATASETS)))
fname = 'dataset_%s.txt' % (dataset)
if dataset == 'p... | [
"def get_data_vector(self, name=None):\n if name is None:\n return self.get(\"data_vectors\")[0]\n\n for v in self.get(\"data_vectors\"):\n if v[\"name\"] == name:\n break\n return v",
"def feature(self, key, feat=''):\n if feat :\n if self.data.ha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verifies a pair using the given model. Each item of the pair is given as (dataset, name). | def verifypair(a, b, model, debug=0):
def log(s):
if debug:
print >>sys.stderr, s
ret = stor(fields=getmodelfields(model), modelid=model, modelname=MODELS[model])
# read feature dicts
fd1 = getfdict(a[0], a[1])
log('fdict1 : %s' % (fd1))
fd2 = getfdict(b[0], b[1])
log('f... | [
"def verify_model(loaded_data, expected, model_type):\n for key in expected:\n if key == 'password' or key == 'wishlist':\n continue\n assert loaded_data[key] == expected[key]\n assert loaded_data['type'] == model_type\n if 'wishlist' in loaded_data:\n for i in range(len(exp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verifies a fvec against an entire dataset using the given model. Results are a sorted list of (score, fname from the dataset). | def verifyall(fvec, dataset, svm_model, simmeths):
fnames, datafvecs = zip(*dataset.items())
# get similarity fvecs
simfvecs = [getSimFeaturesFromFvals(fvec, dfv, simmeths) for dfv in datafvecs]
# compute results
results = [(label*score, fname) for (label, score), fname in zip(bulkclassify(svm_model... | [
"def verifypair(a, b, model, debug=0):\n def log(s):\n if debug:\n print >>sys.stderr, s\n ret = stor(fields=getmodelfields(model), modelid=model, modelname=MODELS[model])\n # read feature dicts\n fd1 = getfdict(a[0], a[1])\n log('fdict1 : %s' % (fd1))\n fd2 = getfdict(b[0], b[1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing user application PUT regenerate endpoint | def test_regenerate_endpoint(self):
res = self.put(self.REG_URN.format(uuid=generate_uuid()))
self.assert404(res)
# import template datasets to DB
app = Application(userId=self.user.id, **datasets.regenerate.APP)
db.session.add(app)
db.session.commit()
res = se... | [
"def test_user_update_o_auth2_application(self):\n pass",
"def test_put_regenerate_secret_key(self):\n app = self.create_oauth_application(user=self.user)\n original_secret = app.client_secret\n\n rsp = self.api_put(get_oauth_app_item_url(app.pk),\n {'regenera... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Obtains a view from DocuSign. The user will then be redirected to the view url Uses the information stored in the session to request the view. RETURNS {err, redirect_url} | def get_view():
err = False # No problems so far!
auth = ds_authentication.get_auth()
if auth["err"]:
return {"err": auth["err"], "err_code": auth["err_code"]}
if not embedded_signing_key in session:
return {"err": "Embedded signing information missing from session! Please re-send."}
... | [
"def return_url():\n\n err = False # No problems so far!\n\n # Retrieving our \"state\" about which embedded signing experience has\n # been completed: there are multiple options. iFrames are never needed\n # and should never be used since the DocuSign embedded signing experience\n # needs the entire... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DocuSign redirects to here after the person has finished signing Query Parameter "event" is supplied by DocuSign RETURNS {err, html} | def return_url():
err = False # No problems so far!
# Retrieving our "state" about which embedded signing experience has
# been completed: there are multiple options. iFrames are never needed
# and should never be used since the DocuSign embedded signing experience
# needs the entire screen, espec... | [
"def event(request, index_id, event_id):\n context = {\"index_id\": index_id, \"event_id\": event_id}\n return render(request, 'event.html', context)",
"def return_event_profile():\n\n # eventname = session['event']\n\n event = crud.return_dance_event(eventname)\n\n\n return render_template('event... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch the envelope status from DocuSign | def get_status(envelope_id):
# Sample data returned from the Envelopes: Get method
# {
# "status": "completed",
# "documentsUri": "/envelopes/ed400d38-7765-4ce5-9f50-8652a8c4486d/documents",
# "recipientsUri": "/envelopes/ed400d38-7765-4ce5-9f50-8652a8c4486d/recipients",
# "enve... | [
"def documents_status(self,\r\n document_id):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(document_id=document_id)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/signature/docume... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a document from DocuSign | def get_doc():
err = False # No problems so far!
auth = ds_authentication.get_auth()
if auth["err"]:
return {"err": auth["err"], "err_code": auth["err_code"]}
uri = request.args.get("url")
fn = request.args.get("fn")
if not uri:
return {"err": "query parameter url is missing!"}... | [
"def get_document(doc):\n try:\n doc = db.collection(\"parameters\").document(doc).get()\n return doc.to_dict()\n except Exception as e:\n error = f\"Error during 'firedb.get_document()' execution: {e}\"\n tg.send_error_message(error)",
"def retrieve_document(doc_id):\n\n db =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
move_to_extern_if takes a written archive file and writes to extern if those video clips that satisfy a certain header condition. | def move_to_extern_if(output, condition, output_extern, threads=None):
if threads == None:
# this isn't optimal because we could easily skip over groups
pre_parsed = {file: file_get(file) for file in _all_files(output)}
else:
pre_parsed = threads.map(file_get, _all_files(output))
rt... | [
"def move_from_extern_if(output, condition, threads=None):\n if threads == None:\n pre_parsed = {file_get(file) for file in _all_files(output)}\n else:\n pre_parsed = threads.map(file_get, _all_files(output))\n\n rtn = []\n for (header, ref_file, is_extern, _) in pre_parsed.items():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
move_to_extern_if takes a written archive file and writes to extern if those video clips that satisfy a certain header condition. | def move_from_extern_if(output, condition, threads=None):
if threads == None:
pre_parsed = {file_get(file) for file in _all_files(output)}
else:
pre_parsed = threads.map(file_get, _all_files(output))
rtn = []
for (header, ref_file, is_extern, _) in pre_parsed.items():
if conditi... | [
"def move_to_extern_if(output, condition, output_extern, threads=None):\n if threads == None:\n # this isn't optimal because we could easily skip over groups\n pre_parsed = {file: file_get(file) for file in _all_files(output)}\n else:\n pre_parsed = threads.map(file_get, _all_files(output... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
read_if takes a written archive file and reads only those video clips that satisfy a certain header condition. | def read_if(output, condition, clip_size=5, scratch = DEFAULT_TEMP, threads=None):
#read the meta data
seg_start_data = read_block(add_ext(output, '.start'))
clips = clip_boundaries(seg_start_data['start'],\
seg_start_data['end'],\
clip_size)
... | [
"def _parseLISTmovi(self, size, file):\n i = 0\n n_dc = 0\n done = False\n # If the VOL header doesn't appear within 5MB or 5 video chunks,\n # give up. The 5MB limit is not likely to apply except in\n # pathological cases.\n while i < min(1024 * 1024 * 5, size - 8)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A function that takes an RSS feed data frame as its argument and returns a dataframe with the html removed from the description column, as best as possible given the state of some of the RSS pulls. (I'm looking at you, BuzzFeed) | def clean_desc(df, col=0):
# similar iteration to clean_title, but leveraging the prebuilt functions that
# can be run on BeautifulSoup objects to parse html text
for i, row in df.iterrows():
try:
clean = BeautifulSoup(df.iloc[i, col], 'html.parser').text
except TypeError:
... | [
"def clean_description(df):\n df.description = df.description.apply(lambda x: re.sub('<[^<]+?>', '', x))\n return df",
"def prepare_data(df):\n \n df = df[(~df.readme_contents.str.contains(\"<p \", na=False)) & (~df.readme_contents.str.contains(\"<div \", na=False))].dropna()\n df.loc[(df.language ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs the cleaning functions on the text columns of an RSS feed dataframe. A file name needs to be provided in order to save properly. Note that the final output is saved as a tsv (tab seperated) file in order to avoid any unintended consequences with free text commas. | def clean_save(df, file_name, path=data_path):
# add a title name onto the path variable
new_path = os.path.join(data_path, file_name + '_clean.tsv')
# run each predefined function on a given data frame
df = clean_title(df)
df = clean_desc(df)
# drop the unneaded, 'dirty' columns (using their ... | [
"def clean_text(df):\n\n df.replace(r\"\\*\", \"\", regex=True, inplace=True)\n df.replace(r\"\\n\", ' ', regex=True, inplace=True)\n df.replace(r\"\\r\", ' ', regex=True, inplace=True)\n\n # clean_string = trim_whitespace(df)\n # clean_string = lambda x: re.sub(r\"\\[1\\]\", \"\", x).strip() if isin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that we should not prune if there are no plays before cutoff | def test_prune_plays_skip_prune(app):
# setup
with app.app_context():
db = get_db()
# run
entities = {
"tracks": [
{"track_id": 3, "title": "track 3"},
],
"plays": [
# Current Plays
{"item_id": 3, "created_at": CURRENT_TIMESTAMP},
... | [
"def check_backtrack(self):\n differential = self.character.stats[4] - self.dungeonlevel\n if differential < 0:\n cutoff = float(3 - differential) / float(6 - 6 * differential)\n else:\n cutoff = float(3 + 5 * differential) / float(6 + 6 * differential)\n return ran... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns either a git commit ID, or a X.Y.Z release number, and an indicator if this is a release or not | def get_doc_version():
release_tag, release_commit = get_tag_info()
current_commit = get_commit_info()
if current_commit == release_commit:
return release_tag, 1
else:
return current_commit, 0 | [
"def retrieve_git_info():\n # Is Git installed?\n try:\n subprocess.call(['git', '--version'],\n stdout=subprocess.PIPE)\n except OSError:\n return None\n\n # Decide whether this is a release\n p = subprocess.Popen(\n ['git', 'describe', '--tags', '--candid... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sorts the given file. | def sort(file):
fileHandle = open(file, 'r')
lines = fileHandle.readlines()
fileHandle.close()
lines.sort()
fileHandle = open(file, 'w')
for line in lines:
fileHandle.write(line)
fileHandle.close() | [
"def sort_file(filename, sort, out=stdout):\n people = People()\n with open(filename, 'r') as f:\n for line in f:\n person = parse_record(line)\n people.add_person(person)\n if sort == 'last_name':\n people = people.sorted_by_last_name(reverse=True)\n elif sort == 'ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copies the range (in bytes) between fileStart and fileEnd. | def copySubRangeOfFile(inputFile, fileStart, fileEnd, outputFile):
fileHandle = open(inputFile, 'r')
fileHandle.seek(fileStart)
data = fileHandle.read(fileEnd - fileStart)
assert len(data) == fileEnd - fileStart
fileHandle.close()
fileHandle = open(outputFile, 'w')
fileHandle.write(data)
... | [
"def set(self, first, last):\n if first is not None and last is not None and first > last:\n raise ValueError(\"Byte range does not satisfy first <= last.\")\n elif first is None and last is None:\n raise ValueError(\"Byte range can not omit both first and last offsets.\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds the point in the file to split. Returns an int i such that fileStart <= i < fileEnd | def getMidPoint(file, fileStart, fileEnd):
fileHandle = open(file, 'r')
midPoint = (fileStart + fileEnd) / 2
assert midPoint >= fileStart
fileHandle.seek(midPoint)
line = fileHandle.readline()
assert len(line) >= 1
if len(line) + midPoint < fileEnd:
return midPoint + len(line) -1
... | [
"def find_textfile_split_points(file: str, n: int) -> List[int]:\n filesize = os.path.getsize(file)\n file_chunk_size = int(math.floor(filesize / float(n)))\n offsets = [None] * n\n with open(file, mode=\"rb\") as f:\n offsets[0] = 0\n for idx in range(1, n):\n offset = idx * fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get paths to all RHINO files. Files will be in subjects_dir/subject/rhino. | def get_rhino_files(subjects_dir, subject):
# Base RHINO directory
rhino_dir = op.join(subjects_dir, subject, "rhino")
if " " in rhino_dir:
raise ValueError("subjects_dir/src_dir cannot contain spaces.")
# Surfaces files
surfaces_dir = op.join(rhino_dir, "surfaces")
os.makedirs(surface... | [
"def getEntitySourceFilePaths(self):\n tfilesraw = self.getCustom('Ontology', 'entity_sourcefiles', '')\n\n # Remove any empty terms file names.\n tfileslist = []\n for tfnameraw in tfilesraw.split(','):\n if tfnameraw.strip() != '':\n tfileslist.append(tfnamera... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes in a nii.gz mask file name (which equals zero for background and != zero for the mask) and returns the mask as a 3 x npoints point cloud. | def niimask2indexpointcloud(nii_fname, volindex=None):
vol = nib.load(nii_fname).get_fdata()
if len(vol.shape) == 4 and volindex is not None:
vol = vol[:, :, :, volindex]
if not len(vol.shape) == 3:
Exception("nii_mask must be a 3D volume, or nii_mask must be a 4D volume with volindex spe... | [
"def zscoringNII(filename,sourcedir='../../brainimages'):\n\n # Read images\n reader = ImageReader.ImageReader(sourcedir)\n img = reader.get_raw_image(filename)\n data = img.get_data()\n affine = img.get_affine()\n \n # Compute non-zero mean\n nonzdata = data[np.nonzero(data)]\n meanz = n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes in a nii.gz mask (which equals zero for background and neq zero for the mask) and returns the mask as a 3 x npoints point cloud in native space in mm's. | def niimask2mmpointcloud(nii_mask, volindex=None):
vol = nib.load(nii_mask).get_fdata()
if len(vol.shape) == 4 and volindex is not None:
vol = vol[:, :, :, volindex]
if not len(vol.shape) == 3:
Exception("nii_mask must be a 3D volume, or nii_mask must be a 4D volume with volindex specifyi... | [
"def niimask2indexpointcloud(nii_fname, volindex=None):\n\n vol = nib.load(nii_fname).get_fdata()\n\n if len(vol.shape) == 4 and volindex is not None:\n vol = vol[:, :, :, volindex]\n\n if not len(vol.shape) == 3:\n Exception(\"nii_mask must be a 3D volume, or nii_mask must be a 4D volume wit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find nearest node in nodes to the passed in node. Returns | def _closest_node(node, nodes):
if len(nodes) == 1:
nodes = np.reshape(nodes, [-1, 1])
kdtree = KDTree(nodes)
distance, index = kdtree.query(node)
return index, distance | [
"def near_nodes(self, node):\n nnode = self.tree.len + 1\n r = ceil(5.5*np.log(nnode))\n return self.tree.k_nearest(node,r)",
"def nearest_node(point, nodes,sindex): \n return nearest(point, nodes,sindex)",
"def get_nearest_node(self, point, return_dist=False):\n return ox.get_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read volume info from an MRI file. | def _get_vol_info_from_nii(mri):
dims = nib.load(mri).get_fdata().shape
out = dict(mri_width=dims[0], mri_height=dims[1], mri_depth=dims[2], mri_volume_name=mri)
return out | [
"def read_volume( mdserver_name, fields ):\n global conf\n\n ctl_root = VOLUME_CTL_ROOT( conf, {'NAME': mdserver_name} )\n conf_path = VOLUME_CONF_PATH( ctl_root )\n\n try:\n vol_conf = read_config( conf_path, fields )\n except Exception, e:\n raise MDMethodFailed( \"read_volume\", \"could not r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
sform allows mapping from simple voxel index cordinates (e.g. from 0 to 256) in scanner space to continuous coordinates (in mm) sformcode = os.popen('fslorient getsformcode {}'.format(nii_file)).read().strip() | def _get_sform(nii_file):
sformcode = int(nib.load(nii_file).header["sform_code"])
if sformcode == 1 or sformcode == 4:
sform = nib.load(nii_file).header.get_sform()
else:
raise ValueError("sform code for {} is {}, and needs to be 4 or 1".format(nii_file, sformcode))
sform = Transform... | [
"def _get_mni_sform(nii_file):\n\n sformcode = int(nib.load(nii_file).header[\"sform_code\"])\n\n if sformcode == 1 or sformcode == 4:\n sform = nib.load(nii_file).header.get_sform()\n else:\n raise ValueError(\"sform code for {} is {}, and needs to be 4 or 1\".format(nii_file, sformcode))\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
sform allows mapping from simple voxel index cordinates (e.g. from 0 to 256) in scanner space to continuous coordinates (in mm) sformcode = os.popen('fslorient getsformcode {}'.format(nii_file)).read().strip() | def _get_mni_sform(nii_file):
sformcode = int(nib.load(nii_file).header["sform_code"])
if sformcode == 1 or sformcode == 4:
sform = nib.load(nii_file).header.get_sform()
else:
raise ValueError("sform code for {} is {}, and needs to be 4 or 1".format(nii_file, sformcode))
sform = Trans... | [
"def _get_sform(nii_file):\n\n sformcode = int(nib.load(nii_file).header[\"sform_code\"])\n\n if sformcode == 1 or sformcode == 4:\n sform = nib.load(nii_file).header.get_sform()\n else:\n raise ValueError(\"sform code for {} is {}, and needs to be 4 or 1\".format(nii_file, sformcode))\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set a pixel to 1 if a required majority (default=14) or more pixels in its 3x3x3 neighborhood are 1, otherwise, set the pixel to 0. img is a 3D binary image | def _binary_majority3d(img):
if img.dtype != "bool":
raise ValueError("binary_majority3d(img) requires img to be binary")
if len(img.shape) != 3:
raise ValueError("binary_majority3d(img) requires img to be 3D")
imgout = generic_filter(img, LowLevelCallable(majority.ctypes), size=3).astype... | [
"def mask_img(img,val):\r\n img = deepcopy(img)\r\n data = img.get_data()\r\n data[:] = np.round(data)\r\n data[data!=val] = 0\r\n data[data==val] = 1\r\n return img",
"def majority(bimage, th = 5, mod = False):\n rows, cols = bimage.shape\n out = np.zeros((rows,cols), np.bool)\n for ro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the leastsquares bestfit transform that maps corresponding points A to B in m spatial dimensions. | def best_fit_transform(A, B):
assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
# rotation matrix
H = np.dot(AA.T, B... | [
"def matrix_solve_general_least_squares(a, b):\n G = np.linalg.lstsq(b.T, a.T)[0].T\n # TODO optionally include error metrics\n return G",
"def mmc_optimize(B, labels, manifold_map_fn, integrand, solver='Powell', max_eval=50, summary=False):\n dim = len(B[0])\n L_init = np.eye(dim)\n manifold_op... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the nearest (Euclidean) neighbor in dst for each point in src. | def nearest_neighbor(src, dst):
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(dst)
distances, indices = neigh.kneighbors(src, return_distance=True)
return distances.ravel(), indices.ravel() | [
"def nearest_neighbor(src, dst):\n\n neigh = NearestNeighbors(n_neighbors=1)\n neigh.fit(dst)\n distances, indices = neigh.kneighbors(src, return_distance=True)\n return distances.ravel(), indices.ravel()",
"def find_nearest_neighbor(src, dst):\r\n tree = sp.spatial.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |