query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
return a 2tuple list (idx, run) filter out not incrementables
def compress_runs(runs, incrementable): return [(i, r) for i, r in enumerate(runs) if r.analysis_type in incrementable]
[ "def run_idxs(self):\n return list(range(len(self._h5[RUNS])))", "def xwe_indexes(self):\n ret = [[i] for i in range(len(self.word_list))]\n for mweo in self.mweoccurs:\n if mweo.indexes:\n first = min(mweo.indexes)\n for i in mweo.indexes:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implemented label methods should place labels within a LETTER_HEIGHT x len(label) LETTER_WIDTH region centered at label_x, label_y
def _draw_label(label, label_x, label_y): pass
[ "def write_label(self, contig_name, width, height, font, title_width, upper_left, vertical_label,\n strand, canvas, horizontal_centering=False, center_vertical=False, chop_text=True,\n label_color=(50, 50, 50, 255)):\n upper_left = list(upper_left) # to make it mutable\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the ElasticSearch index or indices to query. By default, we obtain the index from the foreign table options. However, this method can be overridden to derive the index from the query quals. For example, the `timestamp` qual could be used to select one or more timebased indices.
def get_index(self, _quals): return self._options['index']
[ "def es_index(cls):\n return cls.__tablename__", "def get_index(\n self,\n ) -> Callable[[datastore_admin.GetIndexRequest], Awaitable[index.Index]]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deseri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a CoreBluetooth UUID to a Python string. If ``_uuid`` is a 16bit UUID, it is assumed to be a Bluetooth GATT UUID (``0000xxxx00001000800000805f9b34fb``). Args
def cb_uuid_to_str(_uuid: CBUUID) -> str: _uuid = _uuid.UUIDString() if len(_uuid) == 4: return "0000{0}-0000-1000-8000-00805f9b34fb".format(_uuid.lower()) # TODO: Evaluate if this is a necessary method... # elif _is_uuid_16bit_compatible(_uuid): # return _uuid[4:8].lower() else: ...
[ "def uuid_to_string(uuid):\n if not isinstance(uuid, str):\n raise TypeError(\"Expected a UUID string.\")\n\n if len(uuid) != 36:\n raise ValueError(\"Expected the UUID string to be 36 characters long.\")\n\n uuid_text = ALL_128BIT_UUIDS.get(uuid, None)\n if uuid_text is not None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instruct the light to turn on. You can skip the brightness part if your light does not support brightness control.
def turn_on(self, **kwargs): self._brightness = 100 self._state = 'on' #self._light.brightness = kwargs.get(ATTR_BRIGHTNESS, 255) #self._light.turn_on() _LOGGER.info("turn_on() is called")
[ "def turn_on(self, **kwargs: Any) -> None:\n self._light.brightness = kwargs.get(ATTR_BRIGHTNESS, 255)\n self._light.turn_on()", "def set_light_on(self):\r\n self._light = \"ON\"", "def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light", "def set_light_on(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predicts whether the faces belong to a trained class.
def face_prediction(self, frame, faces): predictions = FaceModel.model.predict_proba(FaceModel.emb_array) best_class_indices = np.argmax(predictions, axis=1) best_class_probabilities = predictions[ np.arange(len(best_class_indices)), ...
[ "def classifier(self):\r\n # load dataset\r\n data = load(self.faces_embeddings)\r\n trainX, trainy, testX, testy = data['arr_0'], data['arr_1'], data['arr_2'], data['arr_3']\r\n print('Dataset: train=%d, test=%d' % (trainX.shape[0], testX.shape[0]))\r\n # normalize input vectors\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs all necessary work to do face classification. Returns
def classification(self): if self.video_in != None: ret, frame = self.video_in.get_a_frame() elif self.camera_in != None: ret, frame = self.camera_in.get_a_frame() if ret == True: # detect face faces = FaceModel.detect_face(self, frame) ...
[ "def classify_face(image):\n # print(original)\n print(\"\\nPROCESSING UNKNOWN FACES\")\n\n global next_id\n # image = face_recognition.load_image_file(image, mode=\"L\")\n # image = np.array(image)\n locations = face_recognition.face_locations(image, model=MODEL)\n encodings = face_recognition...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build an expression equivalent to a lookup table
def build_lookup(mapping, var='ptype', default='ptype'): if len(mapping) > 0: return 'where(%s==%s, %s, %s)' % (var, mapping[0][0], mapping[0][1], build_lookup(mapping[1:], var, default)) else: return str(default)
[ "def build_lookup(mapping, var='ptype', default=0.):\n # force mapping to be a list if it wasn't already\n mapping=list(mapping)\n if len(mapping) > 0:\n return 'where(%s==%s, %s, %s)' % (var, mapping[0][0], mapping[0][1], build_lookup(mapping[1:], var, default))\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build an expression equivalent to a lookup table
def build_lookup(mapping, var='ptype', default=0.): # force mapping to be a list if it wasn't already mapping=list(mapping) if len(mapping) > 0: return 'where(%s==%s, %s, %s)' % (var, mapping[0][0], mapping[0][1], build_lookup(mapping[1:], var, default)) else: ret...
[ "def build_lookup(mapping, var='ptype', default='ptype'):\n if len(mapping) > 0:\n return 'where(%s==%s, %s, %s)' % (var, mapping[0][0], mapping[0][1], build_lookup(mapping[1:], var, default))\n else:\n return str(default)", "def __getitem__(self, key: str) -> ir.TableExpr:\n return sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Walks the source paths and place them in appropriate environment variables.
def parse_paths(): sources = get_source_paths() results = collections.defaultdict(list) for root_dir in sources: for script_type, dirs in walkdirs(root_dir).iteritems(): for d in dirs: logger.debug(d) # Add paths to environments ...
[ "def source_paths():\n return ConfigurationVariables()['sourcepath']", "def _set_environment_vars(self):\n os.environ[\"PATH\"] = os.path.join(self.source_folder, \"depot_tools\") + os.pathsep + os.environ[\"PATH\"]\n os.environ[\"DEPOT_TOOLS_PATH\"] = os.path.join(self.source_folder, \"depot_too...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns defaultdict with script type / paths mapping, excluding given patterns and python packages.
def walkdirs(root): scriptype_paths = collections.defaultdict(set) for root, subdirs, files in os.walk(root): # Filter subdirs tmpdir = [] for i in subdirs: if i.startswith(EXCLUDE_PATTERNS): continue if '__init__.py' in os.listdir(os.pat...
[ "def filter_by_patterns(\n packages: Mapping[str, im.Distribution], patterns: Iterable[str]\n ) -> Mapping[str, im.Distribution]:\n if not patterns:\n return packages\n return {k: v for k, v in packages.items() if any(fnmatch(k, p) for p in patterns)}", "def _get_extension_to_ty...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return valid paths from __file__ dir, PYENV and MELENV.
def get_source_paths(): script_paths = set() try: script_paths.update(filter(None, os.environ.get(PYENV).split(os.pathsep))) script_paths.update(filter(None, os.environ.get(MELENV).split(os.pathsep))) except AttributeError: logger.debug('No custom environ variables set.') ...
[ "def get_environ_path(self):\n paths = []\n if 'PYTHONCOMPILED' in os.environ:\n path_string = os.environ['PYTHONCOMPILED']\n paths = path_string.split(os.path.pathsep)\n return paths", "def get_environment_paths(basedir=None):\n basedir = (\n get_default_secre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the best camera zoom given the atlas resolution
def zoom(self): res = np.max(self.metadata["resolution"]) if self.atlas_name == "allen_human_500um": logger.debug( "ATLAS: setting zoom manually for human atlas, atlas needs fixing" ) return 350 else: return 40 / res
[ "def get_allowed_zoom(lon_min, lon_max, lat_min, lat_max, z=18):\n\n x0, y0 = deg2num(lat_min, lon_min, z)\n x1, y1 = deg2num(lat_max, lon_max, z)\n\n sx = abs(x1 - x0) + 1\n sy = abs(y1 - y0) + 1\n\n if sx * sy >= MAX_TILES:\n z = get_allowed_zoom(lon_min, lon_max, lat_min, lat_max, z - 1)\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the rgb color of a region in the atlas
def _get_region_color(self, region): return [ x / 255 for x in self._get_from_structure(region, "rgb_triplet") ]
[ "def get_color_in_region(self, start, end):\n # Input format: (start_x, start_y), (end_x, end_y)\n start_x, start_y = start\n end_x, end_y = end\n\n # x and y are flipped\n crop_img = self.img[start_x:(end_x + 1), start_y:(end_y + 1)]\n channels = cv2.mean(crop_img)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a plane going through a point at pos, oriented orthogonally to the vector norm and of width and height sx, sy.
def get_plane( self, pos=None, norm=None, plane=None, sx=None, sy=None, color="lightgray", alpha=0.25, **kwargs, ): axes_pairs = dict(sagittal=(0, 1), horizontal=(2, 0), frontal=(2, 1)) if pos is None: pos = self.ro...
[ "def plane_from_normal(ctr, normal):\n\tnormal_vector = Vector(normal)\n\tquat = normal_vector.to_track_quat('Z', 'Y') # for normal to rotation only quaternion is available. Z is direction, Y is up\n\tquat_obj = mathutils.Quaternion(quat)\n\teul = quat_obj.to_euler()\n\tbpy.ops.mesh.primitive_plane_add(location=ctr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
compute z_n at previous time, i.e. z_n(t), z_n(t1)
def compute_z_prev(n, Z_opt, device): nzx, nzy = Z_opt.shape[2], Z_opt.shape[3] # no. of channel for noise input nc_z = 3 if n == 0: # z_rand is gaussian noise z_rand = functions.generate_noise([1, nzx, nzy], device= device) z_rand = z_rand.expand(1, 3, Z_opt.shape[2], Z...
[ "def control_z_forward(self, t, z):\n\n if self._use_null:\n null = self.null_z(t)\n\n return z + null, null\n else:\n null = [0.0, 0.0, 0.0, 0.0]\n\n return z, null", "def calcZ(self, t):\n tt = t - self.__startTime\n return self.__startPos[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Usage Compute all the free space on the boundary of cells in the diagram for polygonal chains P and Q and the given eps LF[(i,j)] is the free space of segment [Pi,Pi+1] from point Qj BF[(i,j)] is the free space of segment [Qj,Qj+1] from point Pj
def LF_BF(P, Q, p, q, eps, mdist, P_dist, Q_dist): LF = {} for j in range(q): for i in range(p - 1): LF.update({(i, j): free_line(Q[j], eps, P[i:i + 2], mdist[i, j], mdist[i + 1, j], P_dist[i])}) BF = {} for j in range(q - 1): for i in range(p): BF.update({(i, j):...
[ "def LR_BR(LF, BF, p, q):\n if not (LF[(0, 0)][0] <= 0 and BF[(0, 0)][0] <= 0 and LF[(p - 2, q - 1)][1] >= 1 and BF[(p - 1, q - 2)][1] >= 1):\n rep = False\n BR = {}\n LR = {}\n else:\n LR = {(0, 0): True}\n BR = {(0, 0): True}\n for i in range(1, p - 1):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Usage Compute all the free space,that are reachable from the origin (P[0,0],Q[0,0]) on the boundary of cells in the diagram for polygonal chains P and Q and the given free spaces LR and BR LR[(i,j)] is the free space, reachable from the origin, of segment [Pi,Pi+1] from point Qj BR[(i,j)] is the free space, reachable f...
def LR_BR(LF, BF, p, q): if not (LF[(0, 0)][0] <= 0 and BF[(0, 0)][0] <= 0 and LF[(p - 2, q - 1)][1] >= 1 and BF[(p - 1, q - 2)][1] >= 1): rep = False BR = {} LR = {} else: LR = {(0, 0): True} BR = {(0, 0): True} for i in range(1, p - 1): if LF[(i, 0)]...
[ "def CalcAvailSpace(UPConfig,TimeStep,lu,row,cumAlloc):\n pclid = row[UPConfig['BaseGeom_id']]\n \n# if pclid == 30:\n# print(\"Stop here\")\n \n # get cumAlloc row for polygon\n try:\n caRow = cumAlloc.loc[cumAlloc[UPConfig['BaseGeom_id']]== pclid] #cumAlloc.loc[pclid]\n except...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Usage Compute all the critical values between trajectories P and Q
def compute_critical_values(P, Q, p, q, mdist, P_dist, Q_dist): origin = eucl_dist(P[0], Q[0]) end = eucl_dist(P[-1], Q[-1]) end_point = max(origin, end) cc = set([end_point]) for i in range(p - 1): for j in range(q - 1): Lij = point_to_seg(Q[j], P[i], P[i + 1], mdist[i, j], mdis...
[ "def _critical_energy(self):\n if self.is_stable or self.ctau <= 0.:\n self.E_crit = np.inf\n else:\n self.E_crit = self.mass * 6.4e5 / self.ctau", "def frechet(P, Q):\n p = len(P)\n q = len(Q)\n\n mdist = eucl_dist_traj(P, Q)\n P_dist = [eucl_dist(P[ip], P[ip + 1])...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Usage Compute the frechet distance between trajectories P and Q
def frechet(P, Q): p = len(P) q = len(Q) mdist = eucl_dist_traj(P, Q) P_dist = [eucl_dist(P[ip], P[ip + 1]) for ip in range(p - 1)] Q_dist = [eucl_dist(Q[iq], Q[iq + 1]) for iq in range(q - 1)] cc = compute_critical_values(P, Q, p, q, mdist, P_dist, Q_dist) eps = cc[0] while (len(cc) !...
[ "def distance(self, p: np.ndarray, q: np.ndarray) -> float:\n\n def calculate(i: int, j: int) -> float:\n \"\"\"\n Calculates the distance between p[i] and q[i]\n :param i: Index into poly-line p\n :param j: Index into poly-line q\n :return: Distance val...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append size with the specified number of entities
def appendsize(self, numents): pass
[ "def appendsize(self, numents):\n self._numents += numents", "def increment_size(self):\n\n self.size += 1", "def inc_size(self):\r\n self.__length += 1", "def extend(self, size: int) -> None:\n if not isinstance(size, int):\n raise TypeError(\"Size must be an integer\")...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append size with the specified number of entities
def appendsize(self, numents): self._numents += numents
[ "def appendsize(self, numents):\n pass", "def increment_size(self):\n\n self.size += 1", "def inc_size(self):\r\n self.__length += 1", "def extend(self, size: int) -> None:\n if not isinstance(size, int):\n raise TypeError(\"Size must be an integer\")\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Allocate memory for the vertex data channels Allocation size is based on the information collected by client calls to appendsize()
def allocatememory(self): self._numvertstotal = self._numents * self._nvet self._cords = VertDataSingleChannel(GLDataType.FLOAT, 3, self._numvertstotal) self._colors = VertDataSingleChannel(GLDataType.UBYTE, 4, self._numvertstotal)
[ "def _allocate_buffer_memory(self):\n for channel in self._channels_dict.values():\n if channel.enabled:\n channel.allocate(self._num_captures, self._num_samples)", "def allocatememory(self):\n\n for key, value in self._dentsvertsdata.items():\n value.allocatemem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Free vertex data channels memory
def free(self): self._cords.free() self._colors.free()
[ "def ggml_free(ctx: ffi.CData) -> None:\n ...", "def free_intermediate_arrays(self):\n self._mgx = None\n self._mgy = None\n self._mgz = None\n self._vander = None\n self._bkg_cube = None\n self._bkg_cube_dirty = True", "def clean(self):\n # Delete vertices / ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Clone the instance of VertDataCollectorCoord3fColor4ub class Overrides the base class abstract method
def clone(self): vdc = VertDataCollectorCoord3fColor4ub(self._enttype) return vdc
[ "def copy(self):\n return Plane3D(self.point.copy(), self.normal.copy())", "def copy(self):\n return vertex(self.x, self.y, self.z)", "def __init__(self, coordinates, colors):\n self.coordinates = coordinates.reshape(-1, 3)\n self.colors = colors.reshape(-1, 3)", "def __makeColorDa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate test image with random pixels and save as an image file.
def test_image(filename, x_size=350, y_size=350): # Create image and loop over all pixels im = Image.new("RGB", (x_size, y_size)) pixels = im.load() for i in range(x_size): for j in range(y_size): x = remap(i, 0, x_size, -1, 1) y = remap(j, 0, y_size, -1, 1) p...
[ "def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n pixels[i, j] = (random.randint(0, 255), # Red channel\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates computational art and save as an image file. All args optional complexity base complexity (depth of recursion) for image creation num_frames determines how many frames will be drawn
def gen_art(complexity=7, num_frames=1, x_size=350, y_size=350): # Functions for red, green, and blue channels - where the magic happens! red_function = bld_func(complexity, complexity+2) green_function = bld_func(complexity, complexity+2) blue_function = bld_func(complexity, complexity+2) # Crea...
[ "def main(filename):\n # Generate frames.\n images = []\n for framenum in range(TOTAL_DOTS):\n image = draw_frame(framenum)\n images.append(image)\n\n # Write gif.\n images[0].save(filename, save_all=True, append_images=images[1:],\n duration=SECONDS / TOTAL_DOTS * 100...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates and returns Y position to draw the graph or the border lines on canvas. Correct calculation is based on given sensor.
def calculate_y_pos(value, sensor): if GraphModel.check_value(value, sensor): return ((32 - int(value)) * 12.5) + 50 if sensor == 't' else 450 - (int(value) / 10 * 40) return
[ "def canvasy( self, y_root, canvas ):\n return canvas.canvasy( y_root ) - canvas.winfo_rooty()", "def get_y(self):\n return self.y_coord", "def y_coord(self):\n\n return self.y0 + np.arange(self.ny) * self.dy", "def get_line_position(self):\r\n # get each line sensor value\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns mean value for values in mean_t or mean_l list based on sensor.
def calculate_mean(cls, sensor): try: if sensor == 't': return cls.calculate_y_pos(sum(cls.mean_t) / len(cls.mean_t), sensor) if sensor == 'l': return cls.calculate_y_pos(sum(cls.mean_l) / len(cls.mean_l), sensor) except ZeroDivisionError: ...
[ "def get_t_mean_value(self, value_list):\n if not len(value_list):\n return None\n else:\n return round(statistics.mean(value_list), 1)", "def _get_u_mean(self, nodelist: List[Tuple[int, int]]) -> Optional[float]:\n meanlist = [self.u_matrix[u_node] for u_node in nodelis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method is used internally to check if the current animation needs to be skipped or not. It also checks if the number of animations that were played correspond to the number of animations that need to be played, and raises an EndSceneEarlyException if they don't correspond.
def update_skipping_status(self): # there is always at least one section -> no out of bounds here if self.file_writer.sections[-1].skip_animations: self.skip_animations = True if ( config["from_animation_number"] and self.num_plays < config["from_animation_num...
[ "def continueLoop(self):\n return(len(self.stages) > len(self.processedStages))", "def verify_ending(self):\n self._fast_forward_to_penultimate_play()\n if self.game_status.game_over:\n # Game shouldn't be over quite yet!\n self.reset()\n return False\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an image from the current frame. The first argument passed to image represents the mode RGB with the alpha channel A. The data we read is from the currently bound frame buffer. We pass in 'raw' as the name of the decoder, 0 and 1 args are specifically used for the decoder tand represent the stride and orientati...
def get_image(self) -> Image.Image: raw_buffer_data = self.get_raw_frame_buffer_object_data() image = Image.frombytes( "RGBA", self.get_pixel_shape(), raw_buffer_data, "raw", "RGBA", 0, -1, ) return image
[ "def grabRawFrame(self):\r\n \r\n self.surface = self.capture.get_image(self.surface)\r\n width, height = self.surface.get_size()\r\n return pygame.image.tostring(self.surface, 'RGB'), width, height, 1", "def getImage(self, *args):\n return _yarp.IFrameGrabberImageRaw_getImage(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the commitment to sha256(serialization of public key P2) Return in hex to calling function
def get_commitment(self): if not self.P2: raise PoDLEError("Cannot construct commitment, no P2 available") if not isinstance(self.P2, secp256k1.PublicKey): raise PoDLEError("Cannot construct commitment, P2 is not a pubkey") self.commitment = hashlib.sha256(self.P2.seriali...
[ "def hexsha(self):\r\n return b2a_hex(self[1])", "def binsha(self):\r\n return self[1]", "def sha256(self):\n return self._sha256", "def SHA256(self) -> _n_0_t_3[_n_0_t_9]:", "def oldhexsha(self): \r\n return self[0]", "def sha256_hexoutput(in_str):\r\n return sha256(in_str....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encapsulate all the data representing the proof in a dict for client functions. Data output in hex.
def reveal(self): if not all([self.u, self.P, self.P2, self.s, self.e]): raise PoDLEError("Cannot generate proof, data is missing") if not self.commitment: self.get_commitment() Phex, P2hex, shex, ehex, commit = [ safe_hexlify(x) for x in [self.P.serialize(), ...
[ "def json_data(self):\n self.check_proof()\n return {\n \"vars\": [{'name': v.name, 'T': str(v.T)} for v in self.vars],\n \"proof\": sum([printer.export_proof_item(self.thy, item, unicode=True, highlight=True)\n for item in self.prf.items], []),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For an object created without a private key, check that the opened commitment verifies for at least one NUMS point as defined by the range in index_range
def verify(self, commitment, index_range): if not all([self.P, self.P2, self.s, self.e]): raise PoDLE("Verify called without sufficient data") if not self.get_commitment() == commitment: return False for J in [getNUMS(i) for i in index_range]: sig_priv = secp2...
[ "def _check_validity(self):\n cnt = np.array([len(v) for v in self.t_signatures.values()])\n cnt_n = len(cnt) - self.min_bins\n idx = None\n if cnt_n < 0:\n self.valid = False\n else:\n y = [np.all(cnt[i:(i + self.min_bins)] >= self.min_neigh) for i in range(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the public key binary representation of secp256k1 G
def getG(compressed=True): priv = "\x00"*31 + "\x01" G = secp256k1.PrivateKey(priv, ctx=ctx).pubkey.serialize(compressed) return G
[ "def get_pubkey(self):\n assert(self.valid)\n ret = ECPubKey()\n p = SECP256K1.mul([(SECP256K1_G, self.secret)])\n ret.p = p\n ret.valid = True\n ret.compressed = self.compressed\n return ret", "def public_key(self):\n pkn = self._public_key_native[\"public_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Taking secp256k1's G as a seed, either in compressed or uncompressed form, append "index" as a byte, and append a second byte "counter" try to create a new NUMS base point from the sha256 of that bytestring. Loop counter and alternate compressed/uncompressed until finding a valid curve point. The first such point is co...
def getNUMS(index=0): assert index in range(256) nums_point = None for G in [getG(True), getG(False)]: seed = G + chr(index) for counter in range(256): seed_c = seed + chr(counter) hashed_seed = hashlib.sha256(seed_c).digest() #Every x-coord on the curve ...
[ "def generate_base():\n yield 0\n yield s\n cc = 256 - s\n prev = s\n while True:\n prev = prev + s * cc\n yield prev\n cc *= cc", "def key_generation(K,tweaks,N,i):\n C = bytes(b'\\x1b\\xd1\\x1b\\xda\\xa9\\xfc\\x1a\\x22')\n tweaks.append(b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the algorithm produces the expected NUMS values; more a sanity check than anything since if the file is modified, all of it could be; this function is mostly for testing, but runs fast with precomputed context so can be run in user code too.
def verify_all_NUMS(write=False): nums_points = {} for i in range(256): nums_points[i] = safe_hexlify(getNUMS(i).serialize()) if write: with open("nums_basepoints.txt", "wb") as f: from pprint import pformat f.write(pformat(nums_points)) assert nums_points == prec...
[ "def verify():\n p = sum(map(lambda f: f.p, FileStore.files))\n mean_size = sum(map(lambda f: f.size, FileStore.files)) / len(FileStore.files)\n\n logger.debug(f\"Sum probabilties: {p}\")\n logger.debug(f\"Mean file size: {mean_size}\")", "def test_nans(self):\n xlooks = ylooks ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a secp256k1.PrivateKey priv and a secp256k1.PublicKey nums_pt, an alternate
def getP2(priv, nums_pt): priv_raw = priv.private_key return nums_pt.tweak_mul(priv_raw)
[ "def check_equal_rsa_pub_key(sk2_, sk_):\n pub_n = sk_.public_numbers()\n pub_n2 = sk2_.public_numbers()\n\n self.assertEqual(pub_n2.e, pub_n.e)\n self.assertEqual(pub_n2.n, pub_n.n)", "def gen_pair(dec_digits: int=2048, save_dir: str='.'):\n key = RSA.generate(bits=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
To allow external functions to add PoDLE commitments that were calculated elsewhere;
def add_external_commitments(ecs): update_commitments(external_to_add=ecs)
[ "def update_commitments(commitment=None, external_to_remove=None,\n external_to_add=None):\n c = {}\n if os.path.isfile(PODLE_COMMIT_FILE):\n with open(PODLE_COMMIT_FILE, \"rb\") as f:\n try:\n c = json.loads(f.read())\n except ValueError:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Optionally add the commitment commitment to the list of 'used', and optionally remove the available external commitment whose key value is the utxo in external_to_remove, persist updated entries to disk.
def update_commitments(commitment=None, external_to_remove=None, external_to_add=None): c = {} if os.path.isfile(PODLE_COMMIT_FILE): with open(PODLE_COMMIT_FILE, "rb") as f: try: c = json.loads(f.read()) except ValueError: pr...
[ "def add_external_commitments(ecs):\n update_commitments(external_to_add=ecs)", "def commit(self):\n self.logger.debug(\"Writing flow removal to database\")\n #*** Write to database collection:\n self.flow_rems.insert_one(self.dbdict())", "def add(self, transaction, markers):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of privkeys, try to generate a PoDLE which is not yet used more than tries times.
def generate_podle(priv_utxo_pairs, tries=1, allow_external=None): used_commitments, external_commitments = get_podle_commitments() for priv, utxo in priv_utxo_pairs: for i in range(tries): #Note that we will return the *lowest* index #which is still available. p = Po...
[ "def generate_passwords( n, pw_list ):\n tempPW = random.choice(pw_list)\n k = len(tempPW)\n L = [ pw for pw in pw_list if len(pw) == k ]\n\n ans = [ ]\n for t in range( n ):\n pw = make_password(L, tempPW)\n # print \"pw\", pw\n ans.append( pw )\n for pw in ans:\n print ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function counts how many words are in the given file_name
def count_all_words(file_name): return len(separate_words(file_name))
[ "def count_words(filename):", "def count_words_in_file(file_name):\n\n\treturn len(get_words_in_file(file_name))", "def count_words(filename):\n\ttry:\n\t\twith open(filename, encoding='utf-8') as f_obj:\n\t\t\tcontents = f_obj.read()\n\texcept FileNotFoundError:\n\t\tmsg = \"Sorry, the file \" + filename + \" ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function counts how many words are the same between the cuisine file and menu list.
def count_same_words(cuisine_file, menu): cuisine_list = separate_words(cuisine_file) same_word_count = 0 for i in cuisine_list: for j in menu: if i == j: same_word_count += 1 return same_word_count
[ "def count_words(filename):", "def count_words_in_file(file_name):\n\n\treturn len(get_words_in_file(file_name))", "def count_all_words(file_name):\n\n return len(separate_words(file_name))", "def word_count():\n\n total_lines = 0\n total_words = 0\n total_characters = 0\n\n for path in sys.arg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes a dictionary of cuisines, scores per dining hall menu to a JSON file
def to_JSON(meal, list_of_cuisines, list_of_menus): data = {} for cuisine in list_of_cuisines: cuisine_list = separate_words(cuisines[cuisine]) scores = {} for i in range(len(list_of_menus)): scores[menus[i]] = get_score(cuisines[cuisine], list_of_menus[i]) data[...
[ "def writeJson(genomic_hits, benchmark_hits, cache_dir):\n hits = {\"genomic\": {}, \"benchmark\": {}}\n print(hits)\n consensus = os.path.dirname(genomic_hits).split(\"/\")[-1]\n print(os.listdir(genomic_hits))\n for sc in os.listdir(genomic_hits):\n matrix = sc.split(\"_\")[1][:-3]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get shape key local co
def get_shapekeys_co(ob_name): obj = bpy.data.objects[ob_name]
[ "def getIndex(shapekey):\n return _shkey2Index[shapekey]", "def get_key_id(self):", "def loc_key(self):\r\n key = tuple(self.loc.coord)\r\n return (key)", "def grid_shape(self, key):\n return self.execute(b\"GRID.SHAPE\", key)", "def 取形(我):\n return sorted(我._shapes.keys())", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets all users having a specific treatment. Return a list of unique ids.
def get_userids(cursor, having_treatment=None): cursor.execute('SELECT id FROM users WHERE treatment=?', (having_treatment,)) return cursor.fetchall()
[ "def _get_user_ids(model):\n return model.objects.values_list(\"user\", flat=True).distinct(\"user\")", "async def get_entrant_ids() -> list:\n async with DBConnect(commit=False) as cursor:\n cursor.execute(\n \"\"\"\n SELECT `user_id`\n FROM {entrants}\n \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inject a new user if it is not already existent.
def inject_user(dbConn, uid, username): try: with dbConn: pars = (uid, username) print "pars: ", pars dbConn.execute( """INSERT OR IGNORE INTO users(uid, username) VALUES(?, ?)""", pars) except sqlite3.IntegrityError as e: log.error("Error in ...
[ "def input_and_create_user(self):\n print(\"Please input username!\")\n new_username = input()\n new_user = user.User(new_username)\n self.users.append(new_user)", "def add_user_to_g():\n if CURR_USER in session:\n g.user = User.query.get(session[CURR_USER])", "def update_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make an update for entry in 'notifications' DB table for a notification message. When a notification arrives.
def make_notification_update(dbConn, obj): try: with dbConn: dbConn.execute("update notifications set rcv_ts=? where msg_id=?", (obj['dt'], obj['msg_id'])) except sqlite3.IntegrityError as e: log.error("Error in DB transaction when updating notification fo...
[ "def _update_notification(self, id, tenant_id, notification):\n try:\n name = notification['name']\n notification_type = notification['type'].upper()\n address = notification['address']\n self._notifications_repo.update_notification(id, tenant_id, name,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the last used uid. Corresponds to the number of current rows in users table.
def get_last_uid(cursor): cursor.execute('SELECT count(uid) FROM users') return int(cursor.fetchall()[0][0])
[ "def get_last_uid():\n collection = db.get_collection(\"users\")\n assignment_id = collection.find_one({}, {\"uid\": 1}, sort=[(\"uid\", -1)])\n return assignment_id['uid']", "def findLastUserID():\n conn = engine.connect()\n if CheckTblNameExist(\"lineuser\"):\n result_db = conn.execute(\"s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the last sessios id. Corresponds to the number of current rows in sessions table.
def get_last_sid(cursor): cursor.execute('SELECT count(sid) FROM sessions') return int(cursor.fetchall()[0][0])
[ "def last(self):\n return int(self.rpc.call(MsfRpcMethod.SessionRingLast, [self.sid])['seq'])", "def session_id(self):\n try:\n return self.session['Id']\n except KeyError:\n return None", "def session_id(self):\n return self._session_id", "def get_next_id(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to scroll down and up the page
def scroll_page(self): scroll_down = self.driver.find_element_by_tag_name("html") scroll_down.send_keys(Keys.END) sleep(TestData.DELAY) scroll_down.send_keys(Keys.CONTROL + Keys.HOME) sleep(TestData.DELAY) return True
[ "def scroll_down(self):\r\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\r\n sleep(self.wait)", "def page_up_down(*args, **kwargs):\n self.scroll = min(self._selected_item_index, last_index - self.scroll_items + 1)", "def scroll_page(self):\n\t\tself.ini...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to locate the bot image in the login page
def bot_image(self): return self.bot_image
[ "def logofinder(site):\n\t\n\tif site == \"People Per Hour\":\n\t\treturn 'static/assets/img/peopleperhour-logo2.png'\n\telif site == 'Twago':\n\t\treturn 'static/assets/img/twago logo.png'\n\telif site == 'Truelancer':\n\t\treturn 'static/assets/img/favicon/truelancerlogo.png'\n\telse:\n\t\treturn f\"did not recog...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write into the corpus file.
def corpusWriter(self): with open('corpus.txt', 'w') as file: for quote in self.quotes: file.write(quote + '\n')
[ "def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()", "def save(file, corpus):\n with open(file, 'w') as f_out:\n f_out.write(corpus)", "def wri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets/gets homogeneous external field and does not update vector potential.
def homogeneous_external_field(self): return self._H
[ "def force_field():\n ff = get_native_force_field('martini22')\n nter = ff.modifications['N-ter'].copy()\n nter.name = (nter.name, )\n cter = ff.modifications['C-ter'].copy()\n cter.name = (cter.name, )\n ff.modifications['N-ter'] = nter\n ff.modifications['C-ter'] = cter\n return ff", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets self.gvpei = (self.ae, self.be) + (ai, bi). To be executed in self.external_vector_potential and self.fixed_vortices setters.
def _update_gvpei(self): assert (self.ae is None) == (self.be is None) ai, bi = None, None if self.fixed_vortices is not None and self.fixed_vortices._vpi is not None: ai, bi = self.fixed_vortices._vpi.get_vec_h() assert (ai is None) == (bi is None) vpe...
[ "def VG(self, VGG, Vo, ig):\n # Set gate voltages\n return VGG*self.V.rVG[ig] + Vo*self.V.rVo[ig]", "def _ivp(self, space, initial_point, initial_tangent_vec):\n initial_tangent_vec = initial_tangent_vec / (self.n_steps - 1)\n\n next_point = initial_point + initial_tangent_vec\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets/gets external vector potential.
def external_vector_potential(self): assert (self.ae is None) == (self.be is None) if self.ae is not None: return self.ae, self.be return None
[ "def external_irregular_vector_potential(self):\n if self._vpei is not None:\n return self._vpei.get_vec_h()\n\n return None", "def set_getV(e_schot=None):\n if e_schot is None:\n get_V.call = lambda delta_ni: 0\n else:\n e_schot = np.array(e_schot)\n assert e_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets/gets external irregular vector potential
def external_irregular_vector_potential(self): if self._vpei is not None: return self._vpei.get_vec_h() return None
[ "def external_vector_potential(self):\n assert (self.ae is None) == (self.be is None)\n \n if self.ae is not None:\n return self.ae, self.be\n\n return None", "def set_voltages(): \n #0) set parameters\n from project_parameters import trapFile,multipoleControls,reg,dri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check usage of default credentials on master node
def test_001_check_default_master_node_credential_usage(self): ip = self.config.nailgun_host ssh_client = ssh.Client(ip, self.config.master.master_node_ssh_user, self.config.master.master_node_ssh_password, ...
[ "def test_003_check_default_keystone_credential_usage(self):\n\n usr = self.config.master.keystone_user\n pwd = self.config.master.keystone_password\n url = 'http://{0}:5000/v2.0'.format(self.config.nailgun_host)\n\n try:\n keystone = keystoneclient(usernam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if default credentials for OpenStack cluster have changed
def test_002_check_default_openstack_credential_usage(self): cluster_data = { 'password': self.config.identity.admin_password, 'username': self.config.identity.admin_username} default_data = { 'password': 'admin', 'username': 'admin'} self.verify...
[ "def test_003_check_default_keystone_credential_usage(self):\n\n usr = self.config.master.keystone_user\n pwd = self.config.master.keystone_password\n url = 'http://{0}:5000/v2.0'.format(self.config.nailgun_host)\n\n try:\n keystone = keystoneclient(usernam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check usage of default credentials for keystone on master node
def test_003_check_default_keystone_credential_usage(self): usr = self.config.master.keystone_user pwd = self.config.master.keystone_password url = 'http://{0}:5000/v2.0'.format(self.config.nailgun_host) try: keystone = keystoneclient(username=usr, ...
[ "def test_002_check_default_openstack_credential_usage(self):\n cluster_data = {\n 'password': self.config.identity.admin_password,\n 'username': self.config.identity.admin_username}\n\n default_data = {\n 'password': 'admin',\n 'username': 'admin'}\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
helper funtion to get user.id using email
def getUserID(email): try: user = session.query(User_info).filter_by(email=email).one() return user.id except Exception as e: return None
[ "def get_id(self, email):\n\n query = self._db.User.select(self._db.User.c.email == email)\n query = query.with_only_columns([self._db.User.c.id_, ])\n\n record = query.execute().fetchone()\n return record[0]", "def find_user_id(email: str):\n user_id = sdk.search_users(email=email)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Launch the instance of tensorboard given the directory and port
def launch_tb(logdir: str = None, port: str = '7900'): tb = program.TensorBoard() tb.configure(argv=[None, '--logdir', logdir, '--port', port]) url = tb.launch() print(f'======\nLaunching tensorboard,\nDirectory: {logdir}\nPort: {port}\n======\n') return url
[ "def open_tensorboard(self):\n python_path = sys.executable\n option = '--logdir=' + self.instance.instance_summary_folder_path\n args_ = [python_path, tensorboard_dir(), option]\n self.open_subprocess(args_=args_, subprocess_key=\"tensorboard\")", "def launch(self):\n # Make it...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Random display of 25 fonts
def lf(): return random.sample(font_list, 25)
[ "def random_font():\n f = [\n ('微软雅黑', 30),\n ('宋体', 30),\n ('黑体', 30),\n ('Arial', 30),\n ('仿宋', 30),\n ('Castellar', 30),\n ]\n return random.choice(f)", "def randomText(self):\n source = string.ascii_letters + string.digits\n text = random.choice...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An art font that generates random fonts and random colors.
def rd(text, on_color=None, attr=None, width=80, justify="center"): rand_int = random.randint(1, len(font_list)+1) rand_color = color_dict.get(random.randint(30, 38)) rand_font = font_list[rand_int] print(f"Random font: {format(rand_font)}") f = Figlet( font=rand_font, width=width, ...
[ "def random_font():\n f = [\n ('微软雅黑', 30),\n ('宋体', 30),\n ('黑体', 30),\n ('Arial', 30),\n ('仿宋', 30),\n ('Castellar', 30),\n ]\n return random.choice(f)", "def lf():\n return random.sample(font_list, 25)", "def sample_farts(sample='Sample'):\n for fname ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the ith bit (zeroindexed).
def get_bit(num, i): return 1 if num & 1 << i else 0
[ "def getbit(n, i):\r\n return (n >> i) & 1", "def get_bit(num, i):\n return num & (1 << i) != 0", "def get_bit(number, index):\n return (number >> index) & 1", "def get_bit(cls, int_val: int, idx: int) -> int:\n return (int_val >> idx) & 0x1", "def get_bit(number, index):\n return (int(nu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set kromosom dengan cara mencari biner dari solusi untuk dijadikan 8 kromosom
def setKromosom(self,x,y): binx = bin(x)[2:].zfill(4) biny = bin(y)[2:].zfill(4) self.kromosom = list(binx+biny)
[ "def voitto():\n oikein = (tiedot[\"leveys\"] * tiedot[\"korkeus\"])\n for y in range(tiedot[\"korkeus\"]):\n for x in range(tiedot[\"leveys\"]):\n if tila[\"kentta\"][y][x] == \"0\" or tila[\"kentta\"][y][x] == 1 or tila[\"kentta\"][y][x] == 2 or tila[\"kentta\"][y][x] == 3 or tila[\"kentta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return all the URIs that directly or indirectly share keys with the given URI
def traverse_uris(uri): seen = set() uris_to_check = [uri] while len(uris_to_check) > 0: uri = uris_to_check.pop() if uri not in seen: seen.add(uri) for key in keys_for_uri[uri]: for uri2 in uris_for_key[key]: if uri2 not in seen: ...
[ "def getKeysWithPrefixURI(self, uri):\n parsed = urllib.parse.urlparse(uri)\n container, key = osaka.utils.get_container_and_path(parsed.path)\n bucket = self.bucket(container, create=False)\n collection = bucket.objects.filter(Prefix=key)\n return [item.bucket_name + \"/\" + item...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a sort key for the given URI, based on whether it represents the primary work in the record
def uri_sort_key(uri): if uri.startswith('http://urn.fi/URN:NBN:fi:bib:me:'): priority = int(uri[-2:]) # last two digits are 00 for the primary work, 01+ for other works mentioned else: priority = -1 # higher priority for e.g. authorized agents return (priority, uri)
[ "def get_sort_key(self, item):\n return item.number", "def sortkey(self):\n if not hasattr(self, '_reference'):\n self.parse()\n return self._sortkey", "def sort_key(p):\n # Filename is the 0th entry in tuple\n return p[0]", "def get_sort_key(self):\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the most appropriate URI from the given set of URIs
def select_uri(uris): return sorted(uris, key=uri_sort_key)[0]
[ "def uriListToMultiURI( self, uri_list ):\n if not uri_list:\n return []\n\n id_list = self.extractIds( uri_list )\n if not id_list:\n return []\n\n ( namespace_list, model, action, _, _ ) = self.split( uri_list[0] )\n return self.build( namespace_list, model, action, id_list, True )", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return user details from Kakao account
def get_user_details(self, response): kaccount_email = "" kakao_account = response.get("kakao_account", "") if kakao_account: kaccount_email = kakao_account.get("email", "") properties = response.get("properties", "") nickname = properties.get("nickname") if properti...
[ "def user_data(self, access_token, *args, **kwargs):\r\n data = {'method': 'users.getInfo', 'session_key': access_token}\r\n return mailru_api(data)[0]", "def user_data(self, access_token, *args, **kwargs):\n data = {'method': 'users.getInfo', 'session_key': access_token}\n return mail...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publishes freespace (as measured by e.g. sonar).
def send_free_space(self, distance): self.client.publish('free_space', str(distance))
[ "def collect_free_space():\n temp_file = \"/tmp/freespace.log\"\n os.system('df -h / > %s' % temp_file)\n file_desc = open(temp_file, 'r')\n free = file_desc.readlines()\n file_desc.close()\n free = ''.join(free)\n return free[:-1].replace(\"\\n\", \"<br/>\")", "def usedspace(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch data from RRD archive for given period of time.
def _fetch_data(self, rrdObject, startTime, endTime): #print rrdObject if not path.exists(rrdObject): raise Exception("File not exists: %s" % rrdObject) #print "%s - %s" % (startTime, endTime) rrd_data = None try: rrd_data = rrdtool.fetch(str(rrdObjec...
[ "def fetch_daily_data():\n soup = read_with_cache(RANKING_BASEURL)\n dbwrite_animation(soup)\n images_for_date(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())[0:10])", "def retrieve_radar(product=None, data_time=None, forecast_time=0, dest=None, file_format=None):\n content = getDWDRadar()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download genotype data the save the out put in .data dir
def download_genotype_data(): print("downloading genotype data") download_from_url(PSAM_PATH, dst=f"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam", desc="downloading psam") download_from_url(PVAR_PATH, dst=f"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst", desc="downloading pv...
[ "def download_proteome(proteome_id, data_dir, domain=\"Eukaryota\"):\n base = (\"ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/\"\n \"knowledgebase/reference_proteomes\")\n\n url = [base, domain, proteome_id + \".fasta.gz\"]\n outfile = os.path.join(data_dir, proteome_id + \".fasta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create merged genotype file from psam pvar and pgen
def create_merged_genotype_file(snps_file_path): print("creating merged genotype file") plink_runner = Plink2DockerRunner() shutil.copyfile(snps_file_path, f"{GENOTYPE_DATA_PATH}/{SNP_LIST_FILE_NAME}") plink_runner(f"./plink2 --pfile {IMAGE_SHARE_FOLDER_PATH}/{GENOTYPE_DATA_FOLDER}/{MERGED_GENOTYPE_FILE...
[ "def generate_ped(g_mat, map_order):\n ped_file = []\n # Append the header\n ped_file.append([' # fjFile = GENOTYPE'])\n # Append the names of the SNPs\n ped_file.append([''] + map_order)\n for sample in sorted(g_mat):\n # Use missing codes for the family ID and the maternal and paternal ID...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
to initialise vectors, its size and randomly allocated centroids
def initialize(self): self.SIZE = self.vectors.shape[0] # todo can use max distance to allocation farthest apart points self.centroids = self.vectors[[random.randint(1, self.SIZE) for x in range(self.K)], :]
[ "def random_init(self, train_data):\n\n centroids=np.zeros((self.n_clusters_, train_data.shape[1]))\n for c in range(self.n_clusters_):\n for f in range(train_data.shape[1]):\n centroids[c,f]=random.uniform(min(train_data[:,f]), max(train_data[:,f]))\n\n return centroi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and update clusters till max iterations or the if change rate drops
def create_clusters(self): ex = 0 print 'Iter - Purity Gini Index' while ex < self.MAX_ITERATION: new_clusters = np.zeros(self.centroids.shape) distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1) for i in range(self.K):...
[ "def update_clusters(self, max_iter=20):\n rep_data = self.get_reps()\n # Lazily allocate array for centroids\n if self.centroids is None:\n self.centroids = np.zeros([self.num_classes * self.k, rep_data.shape[1]])\n\n for c in range(self.num_classes):\n\n class_mas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The action controls the robot using mocaps. Specifically, bodies on the robot (for example the gripper wrist) is controlled with mocap bodies. In this case the action is the desired difference in position and orientation (quaternion), in world coordinates, of the of the target body. The mocap is positioned relative to ...
def mocap_set_action(self, action): # @Melissa: Action = 3DOF Cartesian Position Delta + Quaternion if self.sim.model.nmocap > 0: action, _ = np.split(action, (self.sim.model.nmocap * 7, )) action = action.reshape(self.sim.model.nmocap, 7) pos_delta = action[:, :3] ...
[ "def mocap_set_action(sim, action):\n if sim.model.nmocap > 0:\n action, _ = np.split(action, (sim.model.nmocap * 7, ))\n action = action.reshape(sim.model.nmocap, 7)\n # print(action)\n\n pos_delta = action[:, :3]\n quat_delta = action[:, 3:]\n\n reset_mocap2body_xpos(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the mocap welds that we use for actuation.
def reset_mocap_welds(self): if self.sim.model.nmocap > 0 and self.sim.model.eq_data is not None: for i in range(self.sim.model.eq_data.shape[0]): if self.sim.model.eq_type[i] == mujoco_py.const.EQ_WELD: self.sim.model.eq_data[i, :] = np.array( ...
[ "def reset_mocap_welds(sim):\n if sim.model.nmocap > 0 and sim.model.eq_data is not None:\n for i in range(sim.model.eq_data.shape[0]):\n if sim.model.eq_type[i] == mujoco_py.const.EQ_WELD:\n sim.model.eq_data[i, :] = np.array(\n [0., 0., 0., 1., 0., 0., 0.])\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the position and orientation of the mocap bodies to the same values as the bodies they're welded to.
def reset_mocap2body_xpos(self): if (self.sim.model.eq_type is None or self.sim.model.eq_obj1id is None or self.sim.model.eq_obj2id is None): return for eq_type, obj1_id, obj2_id in zip(self.sim.model.eq_type, self.sim.mode...
[ "def reset_mocap2body_xpos(sim):\n\n if (sim.model.eq_type is None or\n sim.model.eq_obj1id is None or\n sim.model.eq_obj2id is None):\n return\n for eq_type, obj1_id, obj2_id in zip(sim.model.eq_type,\n sim.model.eq_obj1id,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes territory selection phase runs until all of the territories in the game world are selected
def init_territory_selection_phase(self): phase_name = "Territory Selection Phase!\n\n" selected_territories = 0 while selected_territories < len(self.world.territories): for i, player in enumerate(self.players): complain = "" selected_territory = None...
[ "def initRegion(self):\n pass", "def _init_subset_loader(self):\n # All strategies start with random selection\n self.subset_indices, self.subset_weights = self._init_subset_indices()\n self._refresh_subset_loader()", "def select_finish(self):\n if not self.game.is_ghost_build...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
finds the epipolar lines in two images given a set of pointcorrespondences
def find_epilines(imgLeft, imgRight, ptsLeft, ptsRight, F): color = [] for i in range(ptsLeft.shape[0]): color.append(tuple(np.random.randint(0, 255, 3).tolist())) print(color) # Find epilines corresponding to points in right image (right image) linesLeft = cv2.computeCorrespondEpilines(pts...
[ "def plot_epipolar_line(img1, img2, x1, x2, F1, F2, imgName, best_num_inlier, num_lines=30):\r\n\tif num_lines > best_num_inlier:\r\n\t\tnum_lines = best_num_inlier\r\n\r\n\tdef draw_lines(img, ax, pts, x, y, title):\r\n\t\tax.axis('off'), ax.imshow(img), ax.set_title(title)\r\n\t\tfor idx in range(num_lines):\r\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Estimate the p_observations_given_state matrix for a set of observations. If observations is a list/array of length N, returns an array of shape (N, S), where element [t, s] is the probability of the observation at time t assuming the system was in fact in state s.
def __call__(self, observations): observations = numpy.asarray(observations) if self.continuous: state_probabilities = [kde(observations) for kde in self.state_distributions] else: state_probabilities = [hist[observations] for hist in self.state_distributions] ret...
[ "def observation_from_state(self, state):\n state_index = self.latent_variable_markov_chain.index_dict[state]\n return np.random.choice(self.observation_states,\n p=self.emission_probabilities[state_index, :])", "def calc_probas_for_state(self, state, remembering, day, obs):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a set of state sequences, estimate the initial and transition probabilities for each state (i.e. the p_initial and p_transition matrices needed for HMM inference).
def estimate_hmm_params(state_sequences, pseudocount=1, moving=True, time_sigma=1): state_sequences = numpy.asarray(state_sequences) n, t = state_sequences.shape s = state_sequences.max() + 1 # number of states initial_counts = numpy.bincount(state_sequences[:,0], minlength=s) + pseudocount p_initia...
[ "def transition_probabilities(match_pos, states_dict):\r\n tran_p_dict = {} # construting HMM dictionary of dictionaries\r\n tran_p_dict['M'] = {}; tran_p_dict['I'] = {}; tran_p_dict['D'] = {}\r\n for key in tran_p_dict:\r\n for i in range(len(match_pos)+1):\r\n if i == 0:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The bit position getter.
def bit_pos(self): return self.byte_ptr * 8 + self.bit_ptr
[ "def get_bit_position(self) -> int:\n\t\tassert 0 <= self._num_bits_remaining <= 7, \"Unreachable state\"\n\t\treturn -self._num_bits_remaining % 8", "def getBit(self, bitVal, position):", "def __getpos__(self, num):\n return self.num_to_pos[num]", "def get_bit(number, position):\n if position < 0 o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The bit position setter.
def bit_pos(self, bits): if bits > len(self): raise BitReaderError('bit_pos(%s) is out of boundary', bits) self.byte_ptr, self.bit_ptr = divmod(bits, 8)
[ "def _set_bit_position(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"bit-position\", parent=self, path_helper=self._path_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read bit_length bits as an integer.
def read(self, bit_length): ret = self.peek(bit_length) self.bit_pos += bit_length return ret
[ "def read_integer(self, number_of_bits):\n\n value = 0\n\n for _ in range(number_of_bits):\n value <<= 1\n value |= self.read_bit()\n\n return value", "def extract_bits(data, bit, length=1):\n bits = bitarray(data, endian='big')\n if length > 1:\n out = bits...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add image to a webfacet.
def upload_webfacet_image(request): if request.method == 'POST': imageform=ImageAssetForm(request.POST, request.FILES) if imageform.is_valid(): webimage = imageform.save(commit=False) # retrieve the webfacet the image should be associated with webfacet_id ...
[ "def add_webfacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n webfacet_id = request.POST.get('webfacet')\r\n print \"WEBFACETid: \", webfacet_id\r\n web...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add image to a audiofacet.
def upload_audiofacet_image(request): if request.method == 'POST': imageform=ImageAssetForm(request.POST, request.FILES) if imageform.is_valid(): audioimage = imageform.save(commit=False) # retrieve the audiofacet the image should be associated with audiof...
[ "def add_audiofacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n audiofacet_id = request.POST.get('audiofacet')\r\n print \"audioFACETid: \", audiofacet_id\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add image to a videofacet.
def upload_videofacet_image(request): if request.method == 'POST': imageform=ImageAssetForm(request.POST, request.FILES) if imageform.is_valid(): videoimage = imageform.save(commit=False) # retrieve the videofacet the image should be associated with videof...
[ "def add_video(self, video):\n self._video_list.append(video)", "def add_image(self, in_image):\n image = in_image\n if not isinstance(image, Image):\n image = Image()\n image.parse_record(in_image)\n self.img_lst.append(image)", "async def set_img(self, ctx, *,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add existing image(s) in the library to another webfacet.
def add_webfacet_image(request): if request.method == "POST": add_image_form = AddImageForm(request.POST, request=request) if add_image_form.is_valid(): webfacet_id = request.POST.get('webfacet') print "WEBFACETid: ", webfacet_id webfacet = get_object_or_4...
[ "def add_printfacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n printfacet_id = request.POST.get('printfacet')\r\n print \"printFACETid: \", printfacet_id\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add existing image(s) in the library to another printfacet.
def add_printfacet_image(request): if request.method == "POST": add_image_form = AddImageForm(request.POST, request=request) if add_image_form.is_valid(): printfacet_id = request.POST.get('printfacet') print "printFACETid: ", printfacet_id printfacet = get...
[ "def _merge(self, other):\n self.plots += other.plots\n return self.plots", "def append_component_images(pldm_fw_up_pkg, image_files):\n for image in image_files:\n with open(image, \"rb\") as file:\n for line in file:\n pldm_fw_up_pkg.write(line)", "def addText...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add existing image(s) in the library to another audiofacet.
def add_audiofacet_image(request): if request.method == "POST": add_image_form = AddImageForm(request.POST, request=request) if add_image_form.is_valid(): audiofacet_id = request.POST.get('audiofacet') print "audioFACETid: ", audiofacet_id audiofacet = get...
[ "def addImage(self, *images):\n for img in images:\n if self.image is None:\n self.image = img\n else:\n self.image.inplaceAdd(img)", "def upload_audiofacet_image(request):\r\n\r\n if request.method == 'POST':\r\n imageform=ImageAssetForm(reques...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given list of ``Tag`` instances, creates a string representation of the list suitable for editing by the user, such that submitting the given string representation back without changing it will give the same list of tags. Tag names which contain DELIMITER will be double quoted. Adapted from Taggit's _edit_string_for_ta...
def join_tags(tags): names = [] delimiter = settings.TAGGIT_SELECTIZE['DELIMITER'] for tag in tags: name = tag.name if delimiter in name or ' ' in name: names.append('"%s"' % name) else: names.append(name) return delimiter.join(sorted(names))
[ "def edit_string_for_tags(tags):\r\n names = []\r\n use_commas = False\r\n for tag in tags:\r\n name = tag.name\r\n if u',' in name:\r\n names.append('\"%s\"' % name)\r\n continue\r\n elif u' ' in name:\r\n if not use_commas:\r\n use_comm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Goes to form with AMOUNT_OF_COURSES text boxes to input courses to schedule, form action=/schedules, method=POST
def how_many_post(): default_courses = ['CS 442', 'CS 392', 'CS 519', 'MA 331'] resp = make_response(render_template( "sched_entry.html", quantity=AMOUNT_OF_COURSES, title='Scheduler', default_vals=default_courses)) resp.set_cookie('course_combos', '', expires=0) return r...
[ "def my_form_post():\n text_list = []\n #make list of form inputs\n for i in range(1, AMOUNT_OF_COURSES + 1):\n form_num = 'text' + str(i)\n text_list.append(request.form[form_num])\n #remove items with no input, generate string of courses\n final_list = []\n for text in text_list:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets input from form, puts it in a list, gets the schedules, send JSON of course combinations and send then to /sched as a cookie
def my_form_post(): text_list = [] #make list of form inputs for i in range(1, AMOUNT_OF_COURSES + 1): form_num = 'text' + str(i) text_list.append(request.form[form_num]) #remove items with no input, generate string of courses final_list = [] for text in text_list: if not...
[ "def schedule(request):\n form = None\n \n #If GET is not empty (ie, if the user has searched for something), use those search parameters to\n # populate form and get search results\n #If they have not, then populate search form based on initial values\n # (TODO -- ask Tim why 'next' is in url aft...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upon a GET request containing csv course names in a query string... Find the combos and send them as JSON
def getCombosAPI(): all_args = request.args.lists() course_list = all_args[0][1][0].split(",") u_COURSE_LIST = map((lambda x: x.upper()), course_list)#make all caps just in case COURSE_LIST = map( str, u_COURSE_LIST)#unicode list -> list of python strs combos = scheduler.schedule(COURSE_LIST) re...
[ "def get_course_lecturers(request_json):\n # get the parameters set by user input\n parameters = request_json[\"queryResult\"][\"parameters\"]\n \n courses_parameter = parameters.get(\"courses\", None)\n\n if courses_parameter is None:\n return {\"fulfillmentText\": \"No courses specified\"}\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the set of combos for the current page
def getCombosForPage(page_num, per_page, count_of_combos, combos): combos_start = (per_page * (page_num - 1)) + 1 combos_end = combos_start + per_page these_combos = {} for key in range(combos_start, combos_end): try: # if new dict is not an int schedules are not sorted on the page ...
[ "def combos():\n print 'Loading combo info page'\n\n test_data_folder = os.path.join('data', 'testdata')\n base_file_name = 'CU-PENN.dvw'\n base_file_key = os.path.join(test_data_folder, base_file_name)\n\n parser = Parser(base_file_key)\n combo_list = parser.read_combos()\n\n combo_dicts = [{'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A limited number of items is in the feed.
def test_limit_items(self): AnnouncementFactory( title="Not going to be there", expires_at=timezone.now() - datetime.timedelta(days=1), ) for i in range(5): AnnouncementFactory() response = self.get("announcements:feed") assert "Not going to ...
[ "def number_of_items(self):", "def limit(requestContext, seriesList, n):\n return seriesList[0:n]", "def limit(self, count):\n self._params.update(limit=count)\n return self", "def limit(self, count):\n self._limit = count\n return self", "def test_limit(self) -> None:\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the mandatory services.
def check_services(self): for service in self.services: try: self.cloud.search_services(service)[0] except Exception: # pylint: disable=broad-except self.is_skipped = True break
[ "def cmd_all_service_check(self, arg):\n print(f'Web service is {\"\" if server_app.check_service() else \"not\"} running.')\n print(f'Database service is {\"\" if database.check_service() else \"not\"} running.')", "def check_services_ready(self, services):\n for ser in services:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the mandatory network extensions.
def check_extensions(self): extensions = self.cloud.get_network_extensions() for network_extension in self.neutron_extensions: if network_extension not in extensions: LOGGER.warning( "Cannot find Neutron extension: %s", network_extension) s...
[ "def _validate(extensions):\n validate_no_empty_strings(extensions)\n validate_list_not_empty(extensions)\n return True", "def _has_valid_extensions(ad):\n return len(ad) == 1", "def _check_whole_network(self):\n if not self.network.check_network():\n # check_network has failed...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }