query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Gets coordinates from a MyCityRequestDataModel and converts them to dictionary required by GIS utilities
def convert_mycity_coordinates_to_arcgis(mycity_request) -> dict: gis_coordinates = { 'x': 0, 'y': 0 } if mycity_request.geolocation_coordinates: gis_coordinates['y'] = mycity_request.geolocation_coordinates["latitudeInDegrees"] gis_coordinates['x'] = mycity_request.geol...
[ "def get_lat(city: Record) -> np.ndarray:\n return city.geometry.y", "def get_x_and_y_as_dict(self):\n data = self.read_data()\n x = data[\"x\"].tolist()\n y = data[\"y\"].tolist()\n return {\"x\": x, \"y\": y}", "def get_city_coordinates(location):\r\n\r\n url = \"http...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the provided coordinates are in any of the cities provided
def are_coordinates_in_city(coordinates, cities): if 'latitudeInDegrees' in coordinates: coordinates['y'] = coordinates["latitudeInDegrees"] coordinates['x'] = coordinates["longitudeInDegrees"] lat = coordinates['y'] long = coordinates['x'] location = gis_utils.reverse_geocode_addr([lo...
[ "def isCoordValid(coordinate):\n return coordinate in allCoords", "def is_location_in_city(address, coordinates):\n if address:\n return is_address_in_city(address)\n if coordinates:\n return are_coordinates_in_city(coordinates, gis_utils.NEIGHBORHOODS)\n\n return True", "def verify_loc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the provided address is in Boston
def is_address_in_city(address): # If we don't have any detail about city or zipcode # we default to Boston for the geocode search parsed_address, _ = usaddress.tag(address) if "PlaceName" not in parsed_address and "ZipCode" not in parsed_address: address = " ".join([address, "Boston"]) ci...
[ "def is_hometown(town_name):\n hometown = \"San Francisco\"\n if town_name == hometown:\n return True\n else:\n return False", "def test_check_coordinate_within_county(self):\n geo_locator = Nominatim(user_agent=USER_AGENT)\n\n location = geo_locator.geocode(\"Berkeley\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines if the provided address or coordinates are located in Boston. If both are provided, address takes priority
def is_location_in_city(address, coordinates): if address: return is_address_in_city(address) if coordinates: return are_coordinates_in_city(coordinates, gis_utils.NEIGHBORHOODS) return True
[ "def is_address_in_city(address):\n\n # If we don't have any detail about city or zipcode\n # we default to Boston for the geocode search\n parsed_address, _ = usaddress.tag(address)\n if \"PlaceName\" not in parsed_address and \"ZipCode\" not in parsed_address:\n address = \" \".join([address, \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create or read from a score file for the Othello game. After adding in the name and score for the current round, updates file accordingly.
def update_score(self): try: with open(self.filename, "r+") as f: contents = f.readlines() except: with open(self.filename, "w") as fn: fn.write("" + self.name + " " + str(self.score) + "\n") return # Store scores in dictio...
[ "def readScores(self):\n f = open(\"./resources/bestScores_\" + str(self.parent.difficulty) + \".txt\", \"r\")\n self.names = [\"\",\"\",\"\",\"\",\"\"]\n self.values = [0,0,0,0,0]\n i=0\n for line in f:\n words = line.split(\",\")\n self.names[i] = words[0]\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of string (contents), store each string within a list of tuples (score_list). Returns a list of tuples.
def add_scores(self, score_list, contents): for line in contents: separator = line.rfind(' ') name, score = line[: separator], line[separator + 1:] score_list.append((name, int(score))) return score_list
[ "def _get_strings_scores(self, str_scores):\n strings_scores = []\n for str_score in str_scores:\n score = sum(str_score) / len(str_score)\n strings_scores.append(score)\n return strings_scores", "def match_score_with_name(names: List[str], name_score: List[Tuple[str, in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unwind an array, matching each element using expression e >>> e = unw(var('x')) >>> e([1, 2])
def unw(e): def match(x): if isinstance(x, list): return [z for y in x for z in e(y)] else: return [] return match
[ "def unify_var(var,x,s):\n if (type(var) == list):\n var = tuple(var)\n elif (type(x) == list):\n x = tuple(x)\n if var in s:\n return unify(s[var],x,s)\n elif x in s:\n return unify(var,s[x],s)\n elif occur_check(var,x,s):\n return None\n else:\n return a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unwind an array, matching each element using expression e >>> e = unp(var('k'), var('v'))
def unp(ke, ve): def match(x): if isinstance(x, dict): return [ sat.cmb(kz, vz) for ky, vy in x.items() for kz in ke(ky) for vz in ve(vy) if sat.cmp(kz, vz) ] else: return [] retu...
[ "def devectorize(self, v):\n s = \"\"\n for ident in v:\n s += self.inverse_mapping[ident]\n return s", "def unw(e):\n def match(x):\n if isinstance(x, list):\n return [z for y in x for z in e(y)]\n else:\n return []\n\n return match", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to load a large PAC file that triggers Js2Py hitting the Python recursion limit. Ensure it raises a more useful message. Note that unrecoverable stack overflows are possible, where this message won't be raised.
def test_large_pac_handling(self): pac_js = 'function FindProxyForURL(url, host) { if(%s) { return "DIRECT"; } }' % \ ' || '.join(200 * ['shExpMatch(host, "*.example.com")']) with pytest.raises(PacComplexityError): PACFile(pac_js, recursion_limit=1000)
[ "def test_TooLarge(self):\n uboot_fname = self.MakeRandomFile(900 * 1024)\n self.bundle.SetFiles('robin_hood', bct=self.bct_fname,\n uboot=uboot_fname, bmpblk=self.bmpblk_fname)\n self.bundle.SelectFdt('dts/tegra-map.dts')\n image = os.path.join(self.tmpdir, 'image.bin')\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filters the elements that contain text (as opposed to images, etc) and extracts the textlines into the constituting textblocks
def extract_text_elements(page): page_elements = get_page_elements(page) istext = lambda el: isinstance(el, LTTextBox) or isinstance(el, LTTextLine) text = filter(istext, page_elements) return [tb for boxes in text for tb in boxes]
[ "def filter_unrelated_lines(text_blocks, _config):\n new_text_blocks=[]\n for tb in text_blocks:\n new_tb=copy.copy(tb)\n new_tb['blockText']=[]\n next_top=tb['blockTop']\n for t in tb['blockText']:\n if t['top'] < next_top + t['height'] / 2:\n next_top=t[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an id column. This method creates an unbounded text field for platforms that support it.
def idcolumn(): return String(512).with_variant(Text(), "sqlite", "postgresql")
[ "def mk_id(field):\n name = field.type_id\n if name == \"string\":\n return \"%s\" % (\"char\")\n elif name == \"array\" and field.size:\n if field.options['fill'].value not in CONSTRUCT_CODE:\n return \"%s\" % convert(field.options['fill'].value)\n else:\n return \"%s\" % field.options['fill'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove unwanted characters from the given string and return a copy All nonletter and nonnumeral characters are considered unwanted except for underscore ('_'), see UNWANTED_CHARS.
def remove_unwanted_chars(inputstring): return inputstring.translate(ASCII_CHARS, UNWANTED_CHARS)
[ "def strip_nonalpha(word: str)->str:\n return re.sub('[^a-zA-Z]+', '', word)", "def clean_str(string):\n string = re.sub(r\"\\s\", \"_\", string.strip())\n return re.sub(r\"[^\\w]\", \"\", string)", "def strip_non_alnum(string):\n return _re_non_alphanum.sub(u\" \", string)", "def strip_chars(stri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import all available module in the specified namespace.
def import_available_modules(namespace): modules = [] for path in sys.path: cand_modpath_glob = os.path.sep.join([path] + namespace.split('.') + ['*.py']) # if sys.path entry being considered is the empty string # (which corresponds to Python packages/modules in current working directo...
[ "def import_star(modules:[str], ns:dict=None):\n global_imports([f\"from {m} import *\" for m in modules], ns)", "def load_all(self):\n new_modules = []\n module_names = sys.modules.keys()\n module_names.sort()\n for module_name in module_names:\n module = sys.modules[mod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorated function, raises ImportError if specified module is not available.
def wrap(orig): try: imported = None for modname in modnames: try: __import__(modname) imported = modname break except ImportError: pass if imported is None: ...
[ "def test_import_function_missing_module():\n with pytest.raises(ModuleNotFoundError):\n some_function = import_function(\"should_not_exist\", \"some_function\")\n some_function()", "def _on_import_factory(module, raise_errors=True):\n def on_import(hook):\n # Import and patch module\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Input is BxNx3 batch of point cloud Output is Bx(vsize^3)
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True): vol_list = [] for b in range(point_clouds.shape[0]): vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius) if flatten: vol_list.append(vol.flatten()) else: vol...
[ "def raw2vpvol_nc_batch():", "def preprocess(name, path_output=False, voxel_size=0.5, labels=True):\n # path_output should not have an extension\n # name need an extension\n plydata = PlyData.read(name)\n pcd = o3d.io.read_point_cloud(name)\n if labels:\n labels = np.asarray(plydata.elements...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
input is Nx3 points. output is vsizevsizevsize assumes points are in range [radius, radius]
def point_cloud_to_volume(points, vsize, radius=1.0): vol = np.zeros((vsize,vsize,vsize)) voxel = 2*radius/float(vsize) locations = (points + radius)/voxel locations = locations.astype(int) vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0 return vol
[ "def size(self) -> Vec:\n return Vec(\n self.max_x - self.min_x,\n self.max_y - self.min_y,\n self.max_z - self.min_z,\n )", "def getSize(self, *args) -> \"SbVec3d\":\n return _coin.SbBox3d_getSize(self, *args)", "def vectorLength(v):\n return math.sqrt(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Input is BxNx3 a batch of point cloud Output is BxVxVxVxnum_samplex3 Added on Feb 19
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128): vol_list = [] for b in range(point_clouds.shape[0]): vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample) vol_list.append(np.expand_dims(vol, 0)) return np.concatenate(vol_list, ...
[ "def raw2vpvol_nc_batch():", "def multi_cloudgen(num_point, num_feat, num_class, edge_length):\n assert(num_feat >= num_class)\n centers = zeros((num_feat, num_class))\n centers[:num_class, :num_class] = (edge_length/sqrt(2))*eye(num_class)\n midpoint = mean(centers, axis=1)\n midpoint.shape = (num...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
input is Nx3 points output is vsizevsizevsizenum_sample3 assumes points are in range [radius, radius] samples num_sample points in each voxel, if there are less than num_sample points, replicate the points Added on Feb 19
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128): vol = np.zeros((vsize,vsize,vsize,num_sample,3)) voxel = 2*radius/float(vsize) locations = (points + radius)/voxel locations = locations.astype(int) loc2pc = {} for n in range(points.shape[0]): loc = tuple(locations...
[ "def spherical_voxel_optimized(points: np.ndarray, size_bandwidth: int, size_radial_divisions: int,\n radius_support: float, do_random_sampling: bool, num_random_points: int) \\\n -> Tuple[np.ndarray, np.ndarray]:\n if do_random_sampling:\n min_limit = 1 if points.shape...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
input is Nx3 points output is imgsizeimgsizenum_sample3 assumes points are in range [radius, radius] samples num_sample points in each pixel, if there are less than num_sample points, replicate the points Added on Feb 19
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128): img = np.zeros((imgsize, imgsize, num_sample, 3)) pixel = 2*radius/float(imgsize) locations = (points[:,0:2] + radius)/pixel # Nx2 locations = locations.astype(int) loc2pc = {} for n in range(points.shape[0]): loc = t...
[ "def sample_image_patch(data_size, patch_size, n_samples):\n height, width = patch_size\n xs = np.random.randint(0, data_size[2] - width, n_samples)\n ys = np.random.randint(0, data_size[1] - height, n_samples)\n return np.dstack((xs, ys)).reshape((n_samples, 2))", "def generate_3circles_data_set(Npts...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read XYZ point cloud from filename PLY file
def read_ply(filename): plydata = PlyData.read(filename) pc = plydata['vertex'].data pc_array = np.array([[x, y, z] for x,y,z in pc]) return pc_array
[ "def load_from_ply(filepath: Path) -> np.ndarray:\n points = ply.read_ply(str(filepath))['points'].to_numpy()\n return points", "def load_xyz(self, filename):\n xyz = torch.Tensor(np.load(filename)).float()[:, :, :3]\n xyz = xyz.permute(2, 0, 1)\n return xyz", "def extract_point_coord...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Color (N,3) points with labels (N) within range 0 ~ num_classes1 as OBJ file
def write_ply_color(points, labels, filename, num_classes=None, colormap=pyplot.cm.jet): labels = labels.astype(int) N = points.shape[0] if num_classes is None: num_classes = np.max(labels)+1 else: assert(num_classes>np.max(labels)) vertex = [] #colors = [pyplot.cm.jet(i/flo...
[ "def save_color_labels(gt_data, binarize, gt_filename, output_filename, slice_axis):\n n_class, h, w, d = gt_data.shape\n labels = range(n_class)\n # Generate color labels\n multi_labeled_pred = np.zeros((h, w, d, 3))\n if binarize:\n gt_data = imed_postpro.threshold_predictions(gt_data)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Color (N,3) points with RGB colors (N,3) within range [0,255] as OBJ file
def write_ply_rgb(points, colors, out_filename, num_classes=None): colors = colors.astype(int) N = points.shape[0] fout = open(out_filename, 'w') for i in range(N): c = colors[i,:] fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2])) fout.close()
[ "def write_obj(file, patches, res=30):\n linspace = th.linspace(0, 1, res).to(patches)\n s_grid, t_grid = th.meshgrid(linspace, linspace)\n verts = coons_sample(s_grid.flatten(),\n t_grid.flatten(), patches).cpu().numpy()\n n_verts = verts.shape[1]\n with open(file, 'w') as f:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
vol is of size vsizevsizevsize output an image to output_filename
def pyplot_draw_volume(vol, output_filename): points = volume_to_point_cloud(vol) pyplot_draw_point_cloud(points, output_filename)
[ "def generate_brick_volfile(volfile_path, volname):\n data = {}\n with open(os.path.join(VOLINFO_DIR, \"%s.info\" % volname)) as info_file:\n data = json.load(info_file)\n\n content = \"\"\n template_file = os.path.join(\n TEMPLATES_DIR,\n \"%s.brick%s.vol.j2\" % (data[\"type\"], os...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export scene bbox to meshes
def write_bbox(scene_bbox, out_filename): def convert_box_to_trimesh_fmt(box): ctr = box[:3] lengths = box[3:] trns = np.eye(4) trns[0:3, 3] = ctr trns[3,3] = 1.0 box_trimesh_fmt = trimesh.creation.box(lengths, trns) return box_trimesh_fmt sce...
[ "def _export_scene_to_fbx(outpath, selected_objects_only = True):\r\n\r\n bpy.ops.export_scene.fbx(\r\n filepath=outpath,\r\n check_existing=False,\r\n axis_forward='-Z',\r\n axis_up='Y',\r\n version='BIN7400',\r\n use_selection=selected_objects_only,\r\n global_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export oriented (around Z axis) scene bbox to meshes
def write_oriented_bbox(scene_bbox, out_filename): def heading2rotmat(heading_angle): pass rotmat = np.zeros((3,3)) rotmat[2,2] = 1 cosval = np.cos(heading_angle) sinval = np.sin(heading_angle) rotmat[0:2,0:2] = np.array([[cosval, -sinval],[sinval, cosval]]) r...
[ "def writeyz(edges, bounds, filename, scale, space):\n\n #start = time.clock()\n file = open(filename, 'wb')\n inkscapeheader(file)\n figdata(file, edges, 'yz', bounds, scale, space)\n inkscapefooter(file)\n file.close()\n #end = time.clock()\n #seconds = \" in %.2f %s\" % (end-start, \"seco...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export oriented (around Y axis) scene bbox to meshes
def write_oriented_bbox_camera_coord(scene_bbox, out_filename): def heading2rotmat(heading_angle): pass rotmat = np.zeros((3,3)) rotmat[1,1] = 1 cosval = np.cos(heading_angle) sinval = np.sin(heading_angle) rotmat[0,:] = np.array([cosval, 0, sinval]) rotmat[2,...
[ "def writeyz(edges, bounds, filename, scale, space):\n\n #start = time.clock()\n file = open(filename, 'wb')\n inkscapeheader(file)\n figdata(file, edges, 'yz', bounds, scale, space)\n inkscapefooter(file)\n file.close()\n #end = time.clock()\n #seconds = \" in %.2f %s\" % (end-start, \"seco...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that BlobDetector throws a Value error if a user tries to use blob_doh on 3d data. `skimage.blob_doh` only supports 2d data.
def test_blob_doh_error_handling(): stack = ImageStack.from_numpy(np.zeros((4, 2, 10, 100, 100), dtype=np.float32)) blob_doh = BlobDetector( min_sigma=1, max_sigma=4, num_sigma=5, threshold=0, detector_method='blob_doh', measurement_type='max', is_volume=...
[ "def blob_detection(post_masked_processed, draw):\n img = cv2.normalize(post_masked_processed, None, 0, 255, cv2.NORM_MINMAX)\n img = img.astype('uint8')\n img = cv2.medianBlur(img, 7)\n\n th2 = filters.threshold_sauvola(img)\n th2 = 255-th2\n th2 = th2.astype(\"uint8\")\n # Set our filtering p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that we can run multiple traces on the same tracer.
def testMultipleTraces(self): tracer = self._tracer_factory.Produce() tracer.BeginTracing() model1 = tracer.EndTracing() tracer.BeginTracing() model2 = tracer.EndTracing() self.assertEqual(1, len(model1.FindAllThreadsNamed('CrBrowserMain'))) self.assertEqual(1, len(model2.FindAllThreadsNamed...
[ "def testMultipleTracers(self):\n tracer1 = self._tracer_factory.Produce()\n tracer2 = self._tracer_factory.Produce()\n # Nested calls to beginTracing is untested and probably won't work.\n tracer1.BeginTracing()\n model1 = tracer1.EndTracing()\n tracer2.BeginTracing()\n model2 = tracer2.EndTra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that we can run multiple traces with multiple tracers.
def testMultipleTracers(self): tracer1 = self._tracer_factory.Produce() tracer2 = self._tracer_factory.Produce() # Nested calls to beginTracing is untested and probably won't work. tracer1.BeginTracing() model1 = tracer1.EndTracing() tracer2.BeginTracing() model2 = tracer2.EndTracing() s...
[ "def testMultipleTraces(self):\n tracer = self._tracer_factory.Produce()\n tracer.BeginTracing()\n model1 = tracer.EndTracing()\n tracer.BeginTracing()\n model2 = tracer.EndTracing()\n self.assertEqual(1, len(model1.FindAllThreadsNamed('CrBrowserMain')))\n self.assertEqual(1, len(model2.FindAll...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns default installation directory for Osagent. It should return
def get_default_installation_dir(): if sys.platform == "win32": install_path = os.path.expandvars(r'%PROGRAMW6432%\dynatrace\oneagent') conf_path = os.path.expandvars(r'%programdata%\dynatrace\oneagent\agent\config\ruxitagentproc.conf') else: install_path = '/opt/dynatrace/oneagent' ...
[ "def get_default_config_persistence_dir():\n if sys.platform == \"win32\":\n ret = os.path.join(os.environ[\"programdata\"], \"dynatrace\", \"oneagent\", \"agent\", \"config\")\n else:\n ret = os.path.join(os.path.sep, \"var\",\"lib\",\"dynatrace\",\"oneagent\",\"agent\",\"config\")\n logging...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns default config persistence for Osagent where all files created by agent will be stored. It
def get_default_config_persistence_dir(): if sys.platform == "win32": ret = os.path.join(os.environ["programdata"], "dynatrace", "oneagent", "agent", "config") else: ret = os.path.join(os.path.sep, "var","lib","dynatrace","oneagent","agent","config") logging.debug("Setting persistence config...
[ "def loadDefaultConfPath():\n global GlobalConfig\n GlobalConfig['path_list']={}\n if MYOS == 'Windows':\n GlobalConfig['path_list']['cache_dir']=os.environ['USERPROFILE'].decode(SYSENC)+u\"\\\\litebook\\\\cache\"\n GlobalConfig['path_list']['bookdb']=os.environ['APPDATA'].decode(SYSENC)+u\"\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieving the list of groups for an authorization roles
def get(isamAppliance, name, check_mode=False, force=False): return isamAppliance.invoke_get("Retrieving the list of groups for an authorization roles", "/authorization/roles/{0}/groups/v1".format(name))
[ "def get_groups():\n \n # Retrieve the admin object\n admin = get_user(get_jwt_identity())\n groups_data = admin.groups\n\n return jsonify(groups_schema.dump(groups_data))", "def get_roles(self):\n\n # Find all groups with 'ca-role' in the role_attribute\n user_filter = \"(&(objectCla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a group to management authorization role
def set(isamAppliance, name, group_name, type='embedded_ldap', check_mode=False, force=False): new_group = True ret_obj = ibmsecurity.isam.base.management_authorization.role.get(isamAppliance, name) if (ret_obj['data']['groups'] == None): ret_obj['data']['groups'] = [] else: for grp in ...
[ "def test_add_role_to_ldap_group(self):\n pass", "def save(self, *args, **kwargs):\n\t\tadmin_group = Group.objects.get_or_create(name='administrator')[0]\n\t\tself.user.groups.add(admin_group)\t\t\n\t\treturn super(AdminUser, self).save(*args, **kwargs)", "def add_gp_role(request, role, group, domain=No...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete a group from management authorization role
def delete(isamAppliance, name, group_name, check_mode=False, force=False): group_found = False ret_obj = ibmsecurity.isam.base.management_authorization.role.get(isamAppliance, name) if (ret_obj['data']['groups'] != None): for grp in ret_obj['data']['groups']: if grp['name'] == group_na...
[ "def test_delete_role_from_ldap_group(self):\n pass", "def Delete(iam,groupname: str):\n\t\t\t\treturn iam.resource.Group(groupname).delete()", "def test_destroy_deployed_group(self):\r\n location = self.driver.list_locations()[0]\r\n group = self.driver.ex_list_groups(location)[0]\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for the first node containing data that matches the KEY Returns the 'node' or None if not found.
def search(self, key): current = self.head while current: if current.data == key: return current else: current = current.next_node return None
[ "def find(self, key):\n node=self.head\n while node:\n if node.data==key:\n return node\n if node.next:\n node=node.next\n return None", "def search(self, key):\n # Find a node with the given item, if any\n node = self._find_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts a new node containing data at index position Insertion takes O(1) time but finding the node at the insertion point takes O(n) time. Takes Overall O(n) time.
def insert(self, data, index): if index == 0: self.add(data) if index > 0: new = Node(data) position = index current = self.head while position > 1: current = current.next_node position -= 1 prev =...
[ "def insert(self, idx, data):\n if idx == 0:\n self.push(data)\n else:\n i = 0\n node = self.head\n while(i+1 < idx and node.next):\n i += 1\n node = node.next\n self.insert_after(node, data)", "def insert_at_positi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes the node containing the data that matches the KEY. Returns the Node or None if doesn't exist. Takes O(n) time.
def remove(self, key): current = self.head prev = None found = False while current and not found: if current.data == key and current is self.head: found = True self.head = current.next_node elif current.data == key: ...
[ "def remove(self, key):\n node, parent = self._find_node_and_parent(key)\n\n value = node.value\n\n self._remove_node(node, parent)\n\n return value", "def remove(self, key):\r\n if self.contains(key) is False:\r\n raise KeyError\r\n hash_idx = hash_string(key,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
transform the audio, just scale the volume of the audio
def get_audio_transform(self, audio): scale = random.random() + 0.5 # 0.5-1.5 audio = scale * audio return audio
[ "def normalize_volume_to(self, sample, volume):\n return volume * sample / self.calc_volume(sample)", "def transform_audio(self, audio_sample, output_path):\n self.shoebox.add_source(self.source_position, signal=audio_sample)\n self.shoebox.add_microphone_array(self.mic)\n\n self.shoeb...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the contents of the Index to disk.
def write(self): err = C.git_index_write(self._index) check_error(err, True)
[ "def save_index():\n es.index(\n index=JDBC_INDEX,\n doc_type=JDBC_DOC_TYPE,\n id=JDBC_ID,\n body=META_DEFAULT\n )", "def write_index(idx, track=sys.stdout):\n filename = _index_filename(track)\n\n if filename is None:\n return\n\n if CACHE_INDEX:\n _cache[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a tree out of the Index. Return the object of the written tree. The contents of the index will be written out to the object database. If there is no associated repository, 'repo' must be passed. If there is an associated repository and 'repo' is passed, then that repository will be used instead. It returns the i...
def write_tree(self, repo=None): coid = ffi.new('git_oid *') if repo: err = C.git_index_write_tree_to(coid, self._index, repo._repo) else: err = C.git_index_write_tree(coid, self._index) check_error(err) return Oid(raw=bytes(ffi.buffer(coid)[:]))
[ "def index(self):\n cindex = ffi.new('git_index **')\n err = C.git_repository_index(cindex, self._repo)\n check_error(err, True)\n\n return Index.from_c(self, cindex)", "def repository(repo):\n tags = registry.get_tags(repo)\n images = registry.get_images(repo)\n\n # swap key ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add or update index entries matching files in the working directory. If pathspecs are specified, only files matching those pathspecs will be added.
def add_all(self, pathspecs=[]): with StrArray(pathspecs) as arr: err = C.git_index_add_all(self._index, arr, 0, ffi.NULL, ffi.NULL) check_error(err, True)
[ "def add_file(self, file_path):\n self._repo.index.add([str(file_path)])", "def update(self, *args, **kwargs):\n\n if args or kwargs:\n files = self.search_files(*args, **kwargs)\n else:\n files = self.files\n\n for db_file in files:\n db_file.update()"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add or update an entry in the Index. If a path is given, that file will be added. The path must be relative to the root of the worktree and the Index must be associated with a repository. If an IndexEntry is given, that entry will be added or update in the Index without checking for the existence of the path or id.
def add(self, path_or_entry): if is_string(path_or_entry): path = path_or_entry err = C.git_index_add_bypath(self._index, to_bytes(path)) elif isinstance(path_or_entry, IndexEntry): entry = path_or_entry centry, str_ref = entry._to_c() err = C...
[ "def add_file(self, file_path):\n self._repo.index.add([str(file_path)])", "def add_or_update_entry(self, entry, id_field=\"domain_name\"):\n # make sure the id_field is a valid index field\n if id_field not in self.index_keys:\n logger.error(\n \"Invalid ID field in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Diff the index against the working directory. Return a object with the differences between the index and the working copy.
def diff_to_workdir(self, flags=0, context_lines=3, interhunk_lines=0): repo = self._repo if repo is None: raise ValueError('diff needs an associated repository') copts = ffi.new('git_diff_options *') err = C.git_diff_init_options(copts, 1) check_error(err) ...
[ "def index(self):\n cindex = ffi.new('git_index **')\n err = C.git_repository_index(cindex, self._repo)\n check_error(err, True)\n\n return Index.from_c(self, cindex)", "def _dir_index(self) -> Path:\n if self.dir_index is not None:\n return self.dir_index\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Diff the index against a tree. Return a object with the differences between the index and the given tree.
def diff_to_tree(self, tree, flags=0, context_lines=3, interhunk_lines=0): repo = self._repo if repo is None: raise ValueError('diff needs an associated repository') if not isinstance(tree, Tree): raise TypeError('tree must be a Tree') copts = ffi.new('git_diff_...
[ "def get_tree_changes(repo):\n with open_repo_closing(repo) as r:\n index = r.open_index()\n\n # Compares the Index to the HEAD & determines changes\n # Iterate through the changes and report add/delete/modify\n # TODO: call out to dulwich.diff_tree somehow.\n tracked_changes =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A collection of conflict information If there are no conflicts None is returned. Otherwise return an object that represents the conflicts in the index. This object presents a mapping interface with the paths as keys. You can use the ``del`` operator to remove a conflict form the Index. Each conflict is made up of three...
def conflicts(self): if not C.git_index_has_conflicts(self._index): self._conflicts = None return None if self._conflicts is None: self._conflicts = ConflictCollection(self) return self._conflicts
[ "def file_conflicts(self):\n # this type is defined in DB-manager\n #File_Conflict_Map = namedtuple(\"File_Conflict_Map\",\n # \"by_file by_mod\")\n\n return self._file_conflicts", "def assert_no_index_conflict(self) -> None:\n matching_keys = set(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The id of the referenced object as a hex string
def hex(self): return self.id.hex
[ "def serialized_id(self) -> str:", "def id(self):\n return id(self._getobj_())", "def uuid(self):\n return self.raw.Id", "def unique_id(self):\n return f\"{self.entity_id}\"", "def _get_id(self) -> \"std::string\" :\n return _core.Property__get_id(self)", "def public_id(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert this entry into the C structure The first returned arg is the pointer, the second is the reference to the string we allocated, which we need to exist past this function
def _to_c(self): centry = ffi.new('git_index_entry *') # basically memcpy() ffi.buffer(ffi.addressof(centry, 'id'))[:] = self.id.raw[:] centry.mode = self.mode path = ffi.new('char[]', to_bytes(self.path)) centry.path = path return centry, path
[ "def qstrvec_t_get_clink_ptr(*args) -> \"PyObject *\":\n return _ida_pro.qstrvec_t_get_clink_ptr(*args)", "def ScXMLStringDataObj_createFor(value: 'char const *') -> \"ScXMLDataObj *\":\n return _coin.ScXMLStringDataObj_createFor(value)", "def ScXMLReferenceDataObj_createFor(reference: 'char const *') -> ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
train stage 1, train parent network.
def train_parent(args, cfg): data_path = args.data_path lr = cfg.tp_lr epoch_size = cfg.tp_epoch_size batch_size = cfg.tp_batch_size rank_id = 0 ckpt_dir = cfg.dirResult + '/parent' if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir, exist_ok=True) if args.run_distribute: ...
[ "def train(self):\n self.train_state = True", "def train(self) -> None:\r\n\r\n self.training = True", "def do_training():\n train_cls = Train()\n train_cls.run()", "def set_train(self):\n # global setting in dygraph\n # NOTE(chenweihang): nn.Layer also can be used in static mode...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
train stage 2, train online network.
def train_online(args, cfg): data_path = args.data_path seq_name = args.seq_name seq_name_list = { 'blackswan': 1e-4, 'goat': 1e-4, 'car-shadow': 5e-6, 'cows': 5e-5, 'car-roundabout': 1e-5, 'paragliding-launch': 1e-4, 'horsejump-high': 1e-4, 'd...
[ "def do_training():\n train_cls = Train()\n train_cls.run()", "def train(self) -> None:\r\n\r\n self.training = True", "def train_model(self):\n retrieved_planner_type, retrieved_path, final_planner_type, final_path, num_paths, num_NN_paths = self.retrieved_and_final_path\n # record stats...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise an HTTPError if the response is 5XX.
def raise_on_unexpected(http_response): if 500 <= http_response.status_code <= 599: raise make_http_exception(response=http_response)
[ "def raise_response_errors(response):\n # check HTTPError\n response.raise_for_status()\n # some sites can return 200 and write error in body\n if 'error' in response.json():\n raise requests.exceptions.HTTPError(response.json()['error'])", "def error_505(error):\n\n try:\n error_desc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creats the conn IMAPClient object with input from the GUI
def connect(self, server): ssl = False # TODO: Un-hardcode, give option in gui print "Creating connection..." try: self.conn = IMAPClient(server, ssl=ssl) print "Connection success\n" except: raise ConnectionManagerException("Connection Failed") ...
[ "def __init__(self, ui, port=-1, parent=None):\n super(ChatWidget, self).__init__(parent)\n self.setupUi(self)\n \n self.shareButton.setIcon(\n UI.PixmapCache.getIcon(\"sharedEditDisconnected.png\"))\n self.startEditButton.setIcon(\n UI.PixmapCache.getIcon(\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set the form variable based on the file name suffix
def set_form(self, fileobj): extension = os.path.splitext(fileobj)[1] if extension == '.eam': self.form = 'eam' elif extension == '.alloy': self.form = 'alloy' elif extension == '.adp': self.form = 'adp' else: raise RuntimeError('u...
[ "def set_filename(self, name):\n\t\tself.cfg.set_str(ROOTKEY, 'filename', os.path.basename(name))", "def SetFileName(self, fileName):\n self.fileName = os.path.basename(fileName)\n self.fileName = os.path.splitext(self.fileName)[0]", "def makeLabelFromFileName(self, filename):\n self.fsLabe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes out the potential in the format given by the form variable to 'filename' with a data format that is nc columns
def write_potential(self, filename, nc=1, numformat='%.8e'): f = open(filename, 'wb') assert self.nr % nc == 0 assert self.nrho % nc == 0 for line in self.header: f.write(line) f.write('{0} '.format(self.Nelements).encode()) f.write(' '.join(self.elements)...
[ "def write_cp2k_wfn(self,filename):\n words = (self.natom_read,\\\n self.nspin_read,\\\n self.nao_read,\\\n self.nset_max,\\\n self.nshell_max)\n self.writeline(words)\n self.writeline(self.nset_info)\n self.writeline(self.n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the energy the energy is made up of the ionic or pair interaction and the embedding energy of each atom into the electron cloud generated by its neighbors
def calculate_energy(self, atoms): pair_energy = 0.0 embedding_energy = 0.0 mu_energy = 0.0 lam_energy = 0.0 trace_energy = 0.0 self.total_density = np.zeros(len(atoms)) if (self.form == 'adp'): self.mu = np.zeros([len(atoms), 3]) self.la...
[ "def compute_energy(self, embeddings,\n other_embeddings):\n energies = tf.reduce_sum(\n -tf.square(embeddings[:, None, :] - other_embeddings[None, :, :]),\n axis=-1)\n return energies", "def calc_electronic_energy(self, verbose=False):\n\n assert(self.scaling_reacti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrapper for extracting the derivative from a spline
def deriv(self, spline): def d_spline(aspline): return spline(aspline, 1) return d_spline
[ "def Spline_Derivative(self, x_values, y_values, n_points, derivative, x_start=np.log(1e-10), x_end=0):\n\t\tif derivative < 1:\n\t\t\traise ValueError(\"Derivative input in Spline_Derivative less than 1. Use Cubic_spline instead.\")\n\t\tTemp_interp = interpolate.splrep(x_values, y_values)\n\t\tx_new = np.linspace...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the access_id of this APIToken.
def access_id(self, access_id): self._access_id = access_id
[ "def access_point_id(self, access_point_id: AccessPointId):\n\n self._access_point_id = access_point_id", "def do_access_key(self, access_key):\n self.amazon_access_key_id = access_key", "def set_access_token(access_token):\n global _access_token\n _access_token = access_token", "def set_acces...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the admin_name of this APIToken.
def admin_name(self, admin_name): self._admin_name = admin_name
[ "def requesting_admin_name(self, requesting_admin_name):\n if self.local_vars_configuration.client_side_validation and requesting_admin_name is None: # noqa: E501\n raise ValueError(\"Invalid value for `requesting_admin_name`, must not be `None`\") # noqa: E501\n\n self._requesting_admin_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the note of this APIToken.
def note(self, note): self._note = note
[ "def set_note(self, product, note, options=[]):\n note = str(note)\n item_index = self.__index__(product, options)\n if item_index != -1:\n self._items_list[item_index].note = note\n self.update_session()", "def set_note(self, **kwargs):\n return self.client.execu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the last_used_on of this APIToken.
def last_used_on(self, last_used_on): self._last_used_on = last_used_on
[ "def used_time(self, used_time: float):\n\n self._used_time = used_time", "def use(self):\n\t\tself.last_used = time()", "def last_user_at(self, value):\n self._last_user_at = value", "def set_used(self, in_use):\n\n if self.__used > 0 and in_use:\n logwarn(\"[\" + Actuator.__s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the access_key of this APIToken.
def access_key(self, access_key): self._access_key = access_key
[ "def do_access_key(self, access_key):\n self.amazon_access_key_id = access_key", "def set_access_token(access_token):\n global _access_token\n _access_token = access_token", "def set_access_token(self, access_token: str):\n self.headers = {\n 'Authorization': f'{access_token}'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the roles of this APIToken.
def roles(self, roles): self._roles = roles
[ "def setRoles(self, roles):\n pass", "def reset_roles(self, new_roles):\n self.roles = new_roles", "def auto_assign_roles(self):\n self._put(\"service/autoAssignRoles\", None, api_version=6)", "def apply_roles(self):\n minion_sets = []\n role_sets = []\n for instance in s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the admin_id of this APIToken.
def admin_id(self, admin_id): self._admin_id = admin_id
[ "def admin_name(self, admin_name):\n\n self._admin_name = admin_name", "def set_user_admin(self, is_admin: bool):\r\n self.is_admin = is_admin", "def configure_admin_unit(self):\n admin_unit_id = self.config.get('admin_unit_id')\n\n if admin_unit_id:\n registry = getUtilit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the propability a candidate wins in tie breaking This function does not throw an exception. If a candidate is NOT in the list, it simply returns 0.
def winning_probability(potential_winners: list, candidate: Candidate): raise NotImplementedError
[ "def _calc_matching_prob(self):\n if not self.professional:\n return 1", "def get_potential(self, move):\n\t\tpossible_comp_wins = 0\n\t\tpossible_user_wins = 0\n\t\tfor combo in self.combo_queue:\n\t\t\tif move in combo.indices:\n\t\t\t\tif combo.user_count + 1 == self.size - 1 and combo.comp_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the max iterations of geometry optimization.
def set_geom_max_iterations(self, iterations): if not isinstance(iterations, int): raise Exception("max iterations must be an integer") self.keywords["CYCLES"] = iterations
[ "def set_max_iterations(self, max_iterations):\n self.max_iterations = max_iterations", "def setMaxIterations(self, maxiter):\n self.setOption(maxiter=maxiter)", "def change_maxiteration(self,new_iteration):\n\tself.maxiteration = new_iteration", "def set_edge_max(self):\n self._fiber_sho...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a MopTask object from a string
def from_string(cls, contents): lines = contents.split('\n') keywords = cls._parse_keywords(lines[0:1]) title = lines[1: 3] mol = cls._parse_molecule(lines[3:]) d = {"keywords": keywords, "title": title, "molecule": mol.as_dict(), "@module": cls.__module__, "@class":...
[ "def parse(cls, string: str):\n task = string.split(\"::\")\n if len(task) == 1:\n dataset = task[0]\n split = None\n elif len(task) == 2:\n dataset = task[0]\n split = task[1]\n else:\n raise ValueError(\"Received unexpected dataset...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method to parse coordinates of Molecule. Copied from GaussianInput class.
def _parse_molecule(cls, contents): paras = {} var_pattern = re.compile("^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$") for l in contents: m = var_pattern.match(l.strip()) if m: paras[m.group(1)] = float(m.group(2)) species = [] coords = [] ...
[ "def _parse_coordinates(self, vars: Dict[str, VariableDefinition]) -> Tuple[Dict[str, VariableDefinition], Dict[str, VariableDefinition]]:\n coords = {name: var for name, var in vars.items() if var.is_coordinate()}\n vars = {name: var for name, var in vars.items() if not var.is_coordinate()}\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The species specification can take many forms. E.g., simple integers representing atomic numbers ("8"), actual species string ("C") or a labelled species ("C1"). Sometimes, the species string is also not properly capitalized, e.g, ("c1"). This method should take care of these known formats.
def parse_species(sp_str): try: return int(sp_str) except ValueError: sp = re.sub("\d", "", sp_str) return sp.capitalize()
[ "def _parse_pecan_species(spec_file):\n with open(spec_file, 'r') as f:\n for line in f:\n try:\n i = line.index('#')\n except ValueError:\n line2 = line.strip()\n else:\n line2 = line[:i].strip()\n\n if len(line2) > ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Anchor cell size in SVG units.
def getSVGSize(self): return self.getAttribute('svg_size')
[ "def set_cell_fontsize(item):\r\n text = item.get_text() # Text(0, 0, 'x') x - cell value\r\n text = str(text) # 'Text(0, 0, 'x')'\r\n text = text[12:-2] # x (str)\r\n lenght = len(text)\r\n item.set_fontsize(20-lenght)", "def format_cell_dimensions(self):\n for row in r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get minimum pixel size limit.
def getMinSize(self): return self.getAttribute('min_size')
[ "def minsize(self):\n return self._minsize", "def min_scale(self) -> float:\n return self.__min_scale", "def getMinSize(self):\n minW = minH = 0 # Let's see if we need bigger than this.\n for e in self.elements:\n eMinW, eMinH = e.getMinSize()\n minW = max(minW,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all odd indexed counted rows from the given dataframe `self.data`
def odd_rows(self): return self.data.iloc[1::2]
[ "def even_rows(self):\n return self.data.iloc[:-2:2]", "def rows(df: pd.DataFrame):\n return len(df.index)", "def cur_group_rows(_data: DataFrame) -> List[int]:\n index = _data.attrs.get(\"_group_index\", None)\n if index is None:\n return list(range(_data.shape[0]))\n return _data.att...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all even indexed counted rows from the given dataframe `self.data`
def even_rows(self): return self.data.iloc[:-2:2]
[ "def rows(df: pd.DataFrame):\n return len(df.index)", "def cur_group_rows(_data: DataFrame) -> List[int]:\n index = _data.attrs.get(\"_group_index\", None)\n if index is None:\n return list(range(_data.shape[0]))\n return _data.attrs[\"_group_data\"].loc[index, \"_rows\"]", "def rows(df):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return only the columns sliced from `self.data` based on given `columns` parameter
def slice(self, columns): pass
[ "def filter_time_series_by_columns(self, columns, data_frame):\n return data_frame[columns]", "def _filter_by_columns(self, cells, columns, invert=False):\n columns_for_cells = self._columns_for_cells(cells)\n return cells[np.in1d(columns_for_cells, columns, invert=invert)]", "def get_colum...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encodes a column using the numerical values and return the dictionary for mapping.
def encode_column(self, column):
[ "def encode_values(data, column, values, **kwargs):\r\n if kwargs['encode_inputs']:\r\n data_column = data.loc[:, column]\r\n contained_values = np.array([v for v in values if v in data_column])\r\n\r\n # First determine and create total number of additional columns\r\n additional_col...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a mypy program. A single call to build performs parsing, semantic analysis and optionally type checking and other build passes for the program and all imported modules, recursively. Return BuildResult if successful; otherwise raise CompileError.
def build(program_path: str, target: int, module: str = None, program_text: Union[str, bytes] = None, alt_lib_path: str = None, bin_dir: str = None, output_dir: str = None, pyversion: int = 3, custom_typing_module: str = None, htm...
[ "def build_python():\n\n run('python setup.py build')", "def build(configure, output):\n try:\n run(configure, output)\n run(['make', '-j8', 'clean'], output)\n run(['make', '-j8'], output)\n run(['make', '-j8', 'check'], output)\n except CalledProcessError as err:\n pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a ready state (one that has all its dependencies met).
def next_available_state(self) -> 'State': i = len(self.states) - 1 while i >= 0: if self.states[i].is_ready(): num_incomplete = self.states[i].num_incomplete_deps() if num_incomplete == 0: # This is perfect; no need to look for the best ma...
[ "def get_ready_clause():\n return (\n models.Q(uploaded_on__isnull=False, live_state__isnull=True)\n | models.Q(live_state__isnull=False)\n | models.Q(\n starting_at__isnull=False,\n live_state=IDLE,\n )\n )", "def ready(self)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Have we seen a module yet?
def has_module(self, name: str) -> bool: return name in self.module_files
[ "def in_builtin_module(self):\r\n return not (self.module_path is None or\r\n self.module_path.endswith('.py'))", "def is_module(self, name):\n if self.get_module_info(name):\n return True\n return False", "def is_module_patched(modname):\r\n return modname ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the state of a source file. In particular, return UNSEEN_STATE if the file has no associated state. This function does not consider any dependencies.
def file_state(self, path: str) -> int: for s in self.states: if s.path == path: return s.state() return UNSEEN_STATE
[ "def module_state(self, name: str) -> int:\n if not self.has_module(name):\n return UNSEEN_STATE\n state = final_state\n fs = self.file_state(self.module_files[name])\n if earlier_state(fs, state):\n state = fs\n return state", "def source_state(self):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the state of a module. In particular, return UNSEEN_STATE if the file has no associated state. This considers also module dependencies.
def module_state(self, name: str) -> int: if not self.has_module(name): return UNSEEN_STATE state = final_state fs = self.file_state(self.module_files[name]) if earlier_state(fs, state): state = fs return state
[ "async def get_state(self, **kwargs: Any) -> ModuleState:\n return self._state", "def file_state(self, path: str) -> int:\n for s in self.states:\n if s.path == path:\n return s.state()\n return UNSEEN_STATE", "def module_state(module):\n icon, state, text = NOT...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find all reachable import statements in a file. Return list of tuples (module id, import line number) for all modules imported in file.
def all_imported_modules_in_file(self, file: MypyFile) -> List[Tuple[str, int]]: def correct_rel_imp(imp: Union[ImportFrom, ImportAll]) -> str: """Function to correct for relative imports.""" file_id = file.fullname() rel = imp.relative ...
[ "def find_source_imports(filename):\n filename = Path(filename).absolute()\n with open(filename, \"r\") as f:\n code = f.read()\n # e.g. if script folder is scripts/ we'll need to relativize it\n package_prefix = \".\".join(Path(filename.parent).relative_to(os.getcwd()).parts)\n logging.debug(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform the code generation passes for type checked files.
def final_passes(self, files: List[MypyFile], types: Dict[Node, Type]) -> None: if self.target in [SEMANTIC_ANALYSIS, TYPE_CHECK]: pass # Nothing to do. else: raise RuntimeError('Unsupported target %d' % self.target)
[ "def process(self) -> None:\n if self.manager.target >= TYPE_CHECK:\n self.type_checker().visit_file(self.tree, self.tree.path)\n if 'dump-infer-stats' in self.manager.flags:\n stats.dump_type_stats(self.tree, self.tree.path, inferred=True,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove current working directory prefix from p, if present. If the result would be empty, return '.' instead.
def remove_cwd_prefix_from_path(p: str) -> str: cur = os.getcwd() # Add separator to the end of the path, unless one is already present. if basename(cur) != '': cur += os.sep # Remove current directory prefix from the path, if present. if p.startswith(cur): p = p[len(cur):] # Avo...
[ "def clean_path(self, pth):\n pth = os.path.normpath(os.path.join(self._cwd, pth))\n return pth", "def removeCurrentDirectory(directory):\n return str(directory)[len(str(os.getcwd()))+1:]", "def remove_prefix(self, subdir, path):\n if path.startswith(subdir):\n path = path[len...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does path refer to a stubs file? Currently check if there is a 'stubs' directory component somewhere in the path.
def is_stub(path: str) -> bool: # TODO more precise check dirname, basename = os.path.split(path) if basename == '': return False else: stubnames = ['stubs', 'stubs-auto'] return (basename in stubnames) or is_stub(dirname)
[ "def is_stub(file):\n\treturn os.stat(file).st_size == 0", "def is_banned(path):\n\n banned_paths = [\n 'scripts/tests/copyright/in',\n 'scripts/tests/copyright/out',\n 'third_party'\n ]\n\n banned_files = [\n 'scripts/lint/set_copyright.sh'\n ]\n\n path_banned = False\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of dependencies that are ready but incomplete.
def num_incomplete_deps(self) -> int: return 0 # Does not matter in this state
[ "def num_incomplete_deps(self) -> int:\n incomplete = 0\n for module in self.dependencies:\n state = self.manager.module_state(module)\n if (not earlier_state(self.state(), state) and\n not self.manager.is_dep(module, self.id)):\n incomplete += 1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Semantically analyze file and advance to the next state.
def process(self) -> None: self.semantic_analyzer().visit_file(self.tree, self.tree.path) self.switch_state(PartiallySemanticallyAnalyzedFile(self.info(), self.tree))
[ "def _parse(self):\n logger.debug('Parsing file: %s', self.filename)\n self._context = []\n self._last_popped = None\n self.statement_pre_read = None\n self.sw = None\n while self.can_read():\n token = self.next_token()\n if token is None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of dependencies that are incomplete. Here complete means that their state is later than this module. Cyclic dependencies are omitted to break cycles forcibly (and somewhat arbitrarily).
def num_incomplete_deps(self) -> int: incomplete = 0 for module in self.dependencies: state = self.manager.module_state(module) if (not earlier_state(self.state(), state) and not self.manager.is_dep(module, self.id)): incomplete += 1 re...
[ "def num_incomplete_deps(self) -> int:\n return 0 # Does not matter in this state", "def count_incomplete(self):\n known_next = set(tup[1] for tup in self.known_nsec3)\n incomplete = known_next - self.known_nsec3_keys\n return len(incomplete)", "def count (self):\n total = 1\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Type check file and advance to the next state.
def process(self) -> None: if self.manager.target >= TYPE_CHECK: self.type_checker().visit_file(self.tree, self.tree.path) if 'dump-infer-stats' in self.manager.flags: stats.dump_type_stats(self.tree, self.tree.path, inferred=True, ty...
[ "def process_based_on_type(self, file_path):\n\t\t# Is this a file?\n\t\tif os.path.isfile(file_path):\n\t\t\tself.process_file(file_path)\n\t\t# Or is it a directory?\n\t\telif os.path.isdir(file_path):\n\t\t\tself.process_directory(file_path)", "def process(self) -> None:\n self.semantic_analyzer().visit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find and read the source file of a module. Return a pair (path, file contents). Return (None, None) if the module could not be found or read.
def read_module_source_from_file(id: str, lib_path: List[str]) -> Tuple[str, str]: path = find_module(id, lib_path) if path is not None: text = '' try: f = open(path) try: text = f.read() finally: ...
[ "def getsourcefile(object):\r\n filename = getfile(object)\r\n if string.lower(filename[-4:]) in ('.pyc', '.pyo'):\r\n filename = filename[:-4] + '.py'\r\n elif filename.endswith('$py.class'):\r\n filename = filename[:-9] + '.py'\r\n for suffix, mode, kind in imp.get_suffixes():\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that all packages containing id have a __init__ file.
def verify_module(id: str, path: str) -> bool: if path.endswith(('__init__.py', '__init__.pyi')): path = dirname(path) for i in range(id.count('.')): path = dirname(path) if not any(os.path.isfile(os.path.join(path, '__init__{}'.format(extension))) for extension in PYT...
[ "def testInitPresence(self):\n for fileName in self.files:\n if os.path.isdir(fileName):\n self.assertTrue(\n os.path.isfile(\n os.path.join(fileName, '__init__.py')\n )\n )", "def ispackage(m):\n try:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate lookup values and return corresponding CIKs.
def get_ciks(self): ciks = dict() for lookup in self._lookups: try: result = self._get_cik(lookup) self._validate_cik(result) # raises error if not valid CIK ciks[lookup] = result except CIKError: pass # If multipl...
[ "def _get_cik(self, lookup):\n self._validate_lookup(lookup)\n try: # try to lookup by CIK\n self._params['CIK'] = lookup\n soup = self._client.get_soup(self.path, self.params)\n except EDGARQueryError: # fallback to lookup by company name\n del self._params['...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get cik for lookup value.
def _get_cik(self, lookup): self._validate_lookup(lookup) try: # try to lookup by CIK self._params['CIK'] = lookup soup = self._client.get_soup(self.path, self.params) except EDGARQueryError: # fallback to lookup by company name del self._params['CIK'] # de...
[ "def name_to_cik(self, name):\n return self.name_cik[name]", "def _lookup(self, c):\n if isinstance(c, int):\n return c, self.calculations.get(c, None)\n if isinstance(c, str):\n try:\n c = int(c)\n return c, self.calculations.get(c, None)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all CIK possibilities if multiple are listed.
def _get_cik_possibilities(soup): try: # Exclude table header table_rows = soup.find('table', {'summary': 'Results'}).find_all('tr')[1:] # Company names are in second column of table return [''.join(row.find_all('td')[1].find_all(text=True)) for row in table_rows]...
[ "def get_possibilities(self, mc):\n if self.chainMode == ChainMode.CHARS:\n return [s for s in mc for t in xrange(mc[s])]\n elif self.chainMode == ChainMode.WORDS:\n return [[s] for s in mc for t in xrange(mc[s])]", "def iterate_kmer(k):\n try:\n bases = ['A','C','T',...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if CIK is 10 digit string.
def _validate_cik(cik): if not (isinstance(cik, str) and len(cik) == 10 and cik.isdigit()): raise CIKError(cik)
[ "def check_digit(self) -> str:\n if self._gtin14:\n ret = self._gtin14[13]\n else:\n ret = self._sscc18[17]\n return ret", "def _isDigit(self, c):\n return c in self.BASE_DIGITS[self.base]", "def is_digit(c):\n return DigitClass.test(c)", "def my_isdigit(s):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that lookup is string.
def _validate_lookup(lookup): if not isinstance(lookup, str): raise TypeError("Lookup value must be string. Given type {0}.".format(type(lookup)))
[ "def test_string_direct(self):\n for source in (\"direct\", \"default\"):\n self.assertEqual(self.setting.detect_type(u\"Hello\", source), \"unicode\")\n self.assertEqual(self.setting.detect_type(\"Hello\", source), \"unicode\")", "def supports(self, lookup_str: str) -> bool:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For some reason, even if you've already loaded the sequence, this might not work until you click "Extensions/Molecule/Molecule sequence/Okay" You don't have to change anything, just do the clicks.
def sequence(): return project().molecule_list()[0].sequence.one_letter_codes
[ "def set_sequence(self, sequence):\n\t\tself.sequence = sequence\n\t\tsystem = self.system\n\t\tsystem.set_sequence(sequence, mode = 'stand alone')", "def disable_seq_setup(self):\n # re-enable widgets, may have been disabled during a prior sequence selection\n self.copy_gizmos_cbox.setEnabled(True)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
I don't actually want to assign peaktypes I want to grab each resonance and assign it
def assign_peaktype(atomtypes): pks = _selected_peaks() for pk in pks: if len(pk.resonances()) != len(atomtypes): raise ValueError('peaktype does not match peak dimensionality') if pk.note in ['artifact', 'noise']: raise ValueError('cannot assign peaktype of noise or arti...
[ "def extractPeaks(sparky_file):\n\n f = open(sparky_file,'r')\n sparky = f.readlines()\n f.close()\n\n # Create peak_lines, a list of tuples that describe the line-numbers of \n # each peak.\n peak_lines = [i for i, l in enumerate(sparky) if l[0:9] == \"type peak\"]\n end_ornament = [i for i, l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
for each selected peak, create a new group
def create_group_for_peak(): for pk in _selected_peaks(): set_new_group([pk])
[ "def dfsortpeakvals(mydf, cd):\n\n filter_col_loc=[col for col in mydf if str(col).startswith(cd + '_center')]\n filter_col_height = [col for col in mydf if str(col).startswith(cd + '_height')]\n filter_col_area = [col for col in mydf if str(col).startswith(cd + '_area')]\n filter_col_sigma = [col for c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns option value from Vu and Vd
def option_value(self, Vu, Vd): return math.exp(-self.r) * (self.p_value() * Vu + (1 - self.p_value()) * Vd)
[ "def option1_vuitton(self):\n return self._option1_vuitton", "def get_voltage(self):\n result=self.asker('OD')\n if result[0] not in ('N', 'E'):\n header=0\n else:\n header=1\n if result[0]=='E':\n overload=True\n else:\n overload=False\n mode='V'\n if heade...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns binary tree for stock prices
def stock_prices(self): tree = [[self.S]] for t in range(self.T): for state in tree: new_state = set() for p in range(len(state)): if len(new_state) == 0: Su = state[p] * self.u Sd = s...
[ "def __build_binary_tree(self):\r\n\r\n # Create starting leaves\r\n for i in range(256):\r\n self.huffman_tree.append({\r\n 'frq': self.huffman_freqs[i],\r\n 'asc': i,\r\n })\r\n\r\n # Pair leaves and branches based on frequency until there i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns reversed binary tree of call option prices
def call_values(self): end_values = map(lambda s: round(max(s - self.X, 0), self.dp), reversed(self.stock_prices()[-1])) reverse_tree = [[*end_values]] for t in range(self.T): for state in reverse_tree: previous_state = [] ...
[ "def put_values(self):\r\n end_values = map(lambda s: round(max(self.X - s, 0), self.dp), \r\n reversed(self.stock_prices()[-1]))\r\n reverse_tree = [[*end_values]]\r\n for t in range(self.T):\r\n for state in reverse_tree:\r\n previous_state = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns reversed binary tree of put option prices
def put_values(self): end_values = map(lambda s: round(max(self.X - s, 0), self.dp), reversed(self.stock_prices()[-1])) reverse_tree = [[*end_values]] for t in range(self.T): for state in reverse_tree: previous_state = [] ...
[ "def stock_prices(self):\r\n tree = [[self.S]]\r\n for t in range(self.T):\r\n for state in tree:\r\n new_state = set()\r\n for p in range(len(state)):\r\n if len(new_state) == 0:\r\n Su = state[p] * self.u\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }