code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# init vertex and triangle buffers vertices = [] triangles = [] vertex_indices = -1 * np.ones([self.height, self.width]).astype(np.int32) for i in range(self.height-1): for j in range(self.width-1): # read corners of square v0 = self.data[i,j,:] v1 = self.data[i,j+1,:] v2 = self.data[i+1,j,:] v3 = self.data[i+1,j+1,:] # check distances d01 = np.abs(v0[2] - v1[2]) d02 = np.abs(v0[2] - v2[2]) d03 = np.abs(v0[2] - v3[2]) d13 = np.abs(v1[2] - v3[2]) d23 = np.abs(v2[2] - v3[2]) # add tri 1 if max(d01, d03, d13) < dist_thresh: # add vertices if vertex_indices[i,j] == -1: vertices.append(v0) vertex_indices[i,j] = len(vertices)-1 if vertex_indices[i,j+1] == -1: vertices.append(v1) vertex_indices[i,j+1] = len(vertices)-1 if vertex_indices[i+1,j+1] == -1: vertices.append(v3) vertex_indices[i+1,j+1] = len(vertices)-1 # add tri i0 = vertex_indices[i,j] i1 = vertex_indices[i,j+1] i3 = vertex_indices[i+1,j+1] triangles.append([i0, i1, i3]) # add tri 2 if max(d01, d03, d23) < dist_thresh: # add vertices if vertex_indices[i,j] == -1: vertices.append(v0) vertex_indices[i,j] = len(vertices)-1 if vertex_indices[i+1,j] == -1: vertices.append(v2) vertex_indices[i+1,j] = len(vertices)-1 if vertex_indices[i+1,j+1] == -1: vertices.append(v3) vertex_indices[i+1,j+1] = len(vertices)-1 # add tri i0 = vertex_indices[i,j] i2 = vertex_indices[i+1,j] i3 = vertex_indices[i+1,j+1] triangles.append([i0, i3, i2]) # return trimesh import trimesh mesh = trimesh.Trimesh(vertices, triangles) return mesh
def to_mesh(self, dist_thresh=0.01)
Convert the point cloud to a mesh. Returns ------- :obj:`trimesh.Trimesh` mesh of the point cloud
1.500847
1.514481
0.990998
return PointCloud( data=self._data.reshape( self.height * self.width, 3).T, frame=self._frame)
def to_point_cloud(self)
Convert the image to a PointCloud object. Returns ------- :obj:`autolab_core.PointCloud` The corresponding PointCloud.
6.627136
8.016813
0.826655
# compute direction via cross product of derivatives gy = cv2.Sobel(self.data, cv2.CV_64F, 1, 0, ksize=ksize) gx = cv2.Sobel(self.data, cv2.CV_64F, 0, 1, ksize=ksize) gx_data = gx.reshape(self.height * self.width, 3) gy_data = gy.reshape(self.height * self.width, 3) pc_grads = np.cross(gx_data, gy_data) # default to point toward camera # normalize pc_grad_norms = np.linalg.norm(pc_grads, axis=1) pc_grads[pc_grad_norms > 0] = pc_grads[pc_grad_norms > 0] / np.tile(pc_grad_norms[pc_grad_norms > 0, np.newaxis], [1, 3]) pc_grads[pc_grad_norms == 0.0] = np.array([0,0,-1.0]) # zero norm means pointing toward camera # reshape normal_im_data = pc_grads.reshape(self.height, self.width, 3) # preserve zeros zero_px = self.zero_pixels() normal_im_data[zero_px[:,0], zero_px[:,1], :] = np.zeros(3) return NormalCloudImage(normal_im_data, frame=self.frame)
def normal_cloud_im(self, ksize=3)
Generate a NormalCloudImage from the PointCloudImage using Sobel filtering. Parameters ---------- ksize : int Size of the kernel to use for derivative computation Returns ------- :obj:`NormalCloudImage` The corresponding NormalCloudImage.
2.638828
2.662775
0.991007
data = Image.load_data(filename) return PointCloudImage(data, frame)
def open(filename, frame='unspecified')
Creates a PointCloudImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`PointCloudImage` The new PointCloudImage.
14.394707
11.006871
1.307793
return NormalCloud( data=self._data.reshape( self.height * self.width, 3).T, frame=self._frame)
def to_normal_cloud(self)
Convert the image to a NormalCloud object. Returns ------- :obj:`autolab_core.NormalCloud` The corresponding NormalCloud.
7.050139
8.57524
0.822151
data = Image.load_data(filename) return NormalCloudImage(data, frame)
def open(filename, frame='unspecified')
Creates a NormalCloudImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`NormalCloudImage` The new NormalCloudImage.
20.21155
10.772474
1.876222
S = np.array([[self._plane_width / self._vol_width, 0, 0], [0, self._plane_height / self._vol_height, 0], [0, 0, self._depth_scale / self._vol_depth]]) return S
def S(self)
:obj:`numpy.ndarray` : The 3x3 scaling matrix for this projection
3.776932
2.928201
1.289847
t = np.array([self._plane_width / 2, self._plane_height / 2, self._depth_scale / 2]) return t
def t(self)
:obj:`numpy.ndarray` : The 3x1 translation matrix for this projection
6.22788
5.393569
1.154686
P = np.r_[np.c_[self.S, self.t], np.array([0,0,0,1])] return P
def P(self)
:obj:`numpy.ndarray` : The 4x4 projection matrix for this camera.
5.86341
3.58762
1.634345
if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3): raise ValueError('Must provide PointCloud or 3D Point object for projection') if point_cloud.frame != self._frame: raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame)) points_proj = self.S.dot(point_cloud.data) + self.t if len(points_proj.shape) == 1: points_proj = points_proj[:, np.newaxis] point_depths = points_proj[2,:] point_z = np.tile(point_depths, [3, 1]) points_proj = np.divide(points_proj, point_z) if round_px: points_proj = np.round(points_proj) points_proj = points_proj[:2,:].astype(np.int16) valid_ind = np.where((points_proj[0,:] >= 0) & \ (points_proj[1,:] >= 0) & \ (points_proj[0,:] < self.width) & \ (points_proj[1,:] < self.height))[0] depth_data = np.zeros([self.height, self.width]) depth_data[points_proj[1,valid_ind], points_proj[0,valid_ind]] = point_depths[valid_ind] return DepthImage(depth_data, frame=self.frame)
def project_to_image(self, point_cloud, round_px=True)
Projects a point cloud onto the camera image plane and creates a depth image. Zero depth means no point projected into the camera at that pixel location (i.e. infinite depth). Parameters ---------- point_cloud : :obj:`autolab_core.PointCloud` or :obj:`autolab_core.Point` A PointCloud or Point to project onto the camera image plane. round_px : bool If True, projections are rounded to the nearest pixel. Returns ------- :obj:`DepthImage` A DepthImage generated from projecting the point cloud into the camera. Raises ------ ValueError If the input is not a PointCloud or Point in the same reference frame as the camera.
2.337681
2.216372
1.054733
# check valid input if not isinstance(depth_image, DepthImage): raise ValueError('Must provide DepthImage object for projection') if depth_image.frame != self._frame: raise ValueError('Cannot deproject points in frame %s from camera with frame %s' %(depth_image.frame, self._frame)) # create homogeneous pixels row_indices = np.arange(depth_image.height) col_indices = np.arange(depth_image.width) pixel_grid = np.meshgrid(col_indices, row_indices) pixels = np.c_[pixel_grid[0].flatten(), pixel_grid[1].flatten()].T depth_data = depth_image.data.flatten() pixels_homog = np.r_[pixels, depth_data.reshape(1, depth_data.shape[0])] # deproject points_3d = np.linalg.inv(self.S).dot(pixels_homog - np.tile(self.t.reshape(3,1), [1, pixels_homog.shape[1]])) return PointCloud(data=points_3d, frame=self._frame)
def deproject(self, depth_image)
Deprojects a DepthImage into a PointCloud. Parameters ---------- depth_image : :obj:`DepthImage` The 2D depth image to projet into a point cloud. Returns ------- :obj:`autolab_core.PointCloud` A 3D point cloud created from the depth image. Raises ------ ValueError If depth_image is not a valid DepthImage in the same reference frame as the camera.
2.795099
2.734728
1.022076
if not isinstance(pixel, Point) and not pixel.dim == 2: raise ValueError('Must provide 2D Point object for pixel projection') if pixel.frame != self._frame: raise ValueError('Cannot deproject pixel in frame %s from camera with frame %s' %(pixel.frame, self._frame)) point = np.r_[pixel.data, depth] point_3d = np.linalg.inv(self.S).dot(point - self.t) return Point(data=point_3d, frame=self._frame)
def deproject_pixel(self, depth, pixel)
Deprojects a single pixel with a given depth into a 3D point. Parameters ---------- depth : float The depth value at the given pixel location. pixel : :obj:`autolab_core.Point` A 2D point representing the pixel's location in the camera image. Returns ------- :obj:`autolab_core.Point` The projected 3D point. Raises ------ ValueError If pixel is not a valid 2D Point in the same reference frame as the camera.
4.061727
3.809791
1.066129
file_root, file_ext = os.path.splitext(filename) if file_ext.lower() != INTR_EXTENSION: raise ValueError('Extension %s not supported for OrhtographicIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION)) camera_intr_dict = copy.deepcopy(self.__dict__) f = open(filename, 'w') json.dump(camera_intr_dict, f) f.close()
def save(self, filename)
Save the CameraIntrinsics object to a .intr file. Parameters ---------- filename : :obj:`str` The .intr file to save the object to. Raises ------ ValueError If filename does not have the .intr extension.
4.35578
4.00884
1.086544
file_root, file_ext = os.path.splitext(filename) if file_ext.lower() != INTR_EXTENSION: raise ValueError('Extension %s not supported for CameraIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION)) f = open(filename, 'r') ci = json.load(f) f.close() return OrthographicIntrinsics(frame=ci['_frame'], vol_height=ci['_vol_height'], vol_width=ci['_vol_width'], vol_depth=ci['_vol_depth'], plane_height=ci['_plane_height'], plane_width=ci['_plane_width'], depth_scale=ci['_depth_scale'])
def load(filename)
Load a CameraIntrinsics object from a file. Parameters ---------- filename : :obj:`str` The .intr file to load the object from. Returns ------- :obj:`CameraIntrinsics` The CameraIntrinsics object loaded from the file. Raises ------ ValueError If filename does not have the .intr extension.
3.575845
3.398311
1.052242
if rospy.get_name() == '/unnamed': raise ValueError('PhoXi sensor must be run inside a ros node!') # Connect to the cameras if not self._connect_to_sensor(): self._running = False return False # Set up subscribers for camera data self._color_im_sub = rospy.Subscriber('/phoxi_camera/texture', ImageMessage, self._color_im_callback) self._depth_im_sub = rospy.Subscriber('/phoxi_camera/depth_map', ImageMessage, self._depth_im_callback) self._normal_map_sub = rospy.Subscriber('/phoxi_camera/normal_map', ImageMessage, self._normal_map_callback) self._running = True return True
def start(self)
Start the sensor.
3.896997
3.738298
1.042452
# Check that everything is running if not self._running: logging.warning('PhoXi not running. Aborting stop') return False # Stop the subscribers self._color_im_sub.unregister() self._depth_im_sub.unregister() self._normal_map_sub.unregister() # Disconnect from the camera rospy.ServiceProxy('phoxi_camera/disconnect_camera', Empty)() self._running = False return True
def stop(self)
Stop the sensor.
5.50766
5.413852
1.017327
# Run a software trigger times = [] rospy.ServiceProxy('phoxi_camera/start_acquisition', Empty)() rospy.ServiceProxy('phoxi_camera/trigger_image', TriggerImage)() self._cur_color_im = None self._cur_depth_im = None self._cur_normal_map = None rospy.ServiceProxy('phoxi_camera/get_frame', GetFrame)(-1) max_time = 5.0 time_waiting = 0.0 while self._cur_color_im is None or self._cur_depth_im is None or self._cur_normal_map is None: time.sleep(0.05) time_waiting += 0.05 if time_waiting > max_time: raise SensorUnresponsiveException('PhoXi sensor seems to be non-responsive') return self._cur_color_im, self._cur_depth_im, None
def frames(self)
Retrieve a new frame from the PhoXi and convert it to a ColorImage, a DepthImage, and an IrImage. Returns ------- :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray` The ColorImage, DepthImage, and IrImage of the current frame.
3.595632
3.311737
1.085724
name = self._device_name try: # Check if device is actively in list rospy.wait_for_service('phoxi_camera/get_device_list') device_list = rospy.ServiceProxy('phoxi_camera/get_device_list', GetDeviceList)().out if not str(name) in device_list: logging.error('PhoXi sensor {} not in list of active devices'.format(name)) return False success = rospy.ServiceProxy('phoxi_camera/connect_camera', ConnectCamera)(name).success if not success: logging.error('Could not connect to PhoXi sensor {}'.format(name)) return False logging.debug('Connected to PhoXi Sensor {}'.format(name)) return True except rospy.ServiceException as e: logging.error('Service call failed: {}'.format(e)) return False
def _connect_to_sensor(self)
Connect to the sensor.
3.003036
2.93512
1.023139
try: data = self._bridge.imgmsg_to_cv2(msg) if np.max(data) > 255.0: data = 255.0 * data / 1200.0 # Experimentally set value for white data = np.clip(data, 0., 255.0).astype(np.uint8) gsimage = GrayscaleImage(data, frame=self._frame) self._cur_color_im = gsimage.to_color() except: self._cur_color_im = None
def _color_im_callback(self, msg)
Callback for handling textures (greyscale images).
3.924759
3.959711
0.991173
try: self._cur_depth_im = DepthImage(self._bridge.imgmsg_to_cv2(msg) / 1000.0, frame=self._frame) except: self._cur_depth_im = None
def _depth_im_callback(self, msg)
Callback for handling depth images.
3.333814
3.417729
0.975447
try: self._cur_normal_map = self._bridge.imgmsg_to_cv2(msg) except: self._cur_normal_map = None
def _normal_map_callback(self, msg)
Callback for handling normal maps.
3.678282
3.709496
0.991585
self._sensor = cv2.VideoCapture(self._device_id) if not self._sensor.isOpened(): raise Exception("Unable to open OpenCVCameraSensor for id {0}".format(self._device_id)) self.flush()
def start(self)
Starts the OpenCVCameraSensor Stream Raises: Exception if unable to open stream
5.256485
2.882249
1.823744
self.flush() ret_val, frame = self._sensor.read() if not ret_val: raise Exception("Unable to retrieve frame from OpenCVCameraSensor for id {0}".format(self._device_id)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if self._upside_down: frame = np.flipud(frame).astype(np.uint8) frame = np.fliplr(frame).astype(np.uint8) return ColorImage(frame)
def frames(self, flush=True)
Returns the latest color image from the stream Raises: Exception if opencv sensor gives ret_val of 0
3.610612
3.16076
1.142324
if render_mode == RenderMode.SEGMASK: return self.query_im elif render_mode == RenderMode.COLOR: return self.color_im elif render_mode == RenderMode.DEPTH: return self.depth_im else: raise ValueError('Render mode %s not supported' %(render_mode))
def image(self, render_mode)
Get the image associated with a particular render mode
2.979447
3.091282
0.963823
# read params foreground_mask_tolerance = cfg['foreground_mask_tolerance'] min_contour_area = cfg['min_contour_area'] max_contour_area = cfg['max_contour_area'] w = cfg['filter_dim'] # mask image using background detection bgmodel = color_im.background_model() binary_im = color_im.foreground_mask(foreground_mask_tolerance, bgmodel=bgmodel) # filter the image y, x = np.ogrid[-w/2+1:w/2+1, -w/2+1:w/2+1] mask = x*x + y*y <= w/2*w/2 filter_struct = np.zeros([w,w]).astype(np.uint8) filter_struct[mask] = 1 binary_im_filtered = binary_im.apply(snm.grey_closing, structure=filter_struct) visualize = False if visualize: plt.figure() plt.imshow(binary_im_filtered.data, cmap=plt.cm.gray) plt.axis('off') plt.show() # find all contours contours = binary_im_filtered.find_contours(min_area=min_contour_area, max_area=max_contour_area) # convert contours to detections detections = [] for contour in contours: box = contour.bounding_box color_thumbnail = color_im.crop(box.height, box.width, box.ci, box.cj) depth_thumbnail = depth_im.crop(box.height, box.width, box.ci, box.cj) binary_thumbnail = binary_im_filtered.crop(box.height, box.width, box.ci, box.cj) thumbnail_intr = camera_intr if camera_intr is not None: thumbnail_intr = camera_intr.crop(box.height, box.width, box.ci, box.cj) detections.append(RgbdDetection(color_thumbnail, depth_thumbnail, box, binary_thumbnail=binary_thumbnail, contour=contour, camera_intr=thumbnail_intr)) return detections
def detect(self, color_im, depth_im, cfg, camera_intr=None, T_camera_world=None, segmask=None)
Detects all relevant objects in an rgbd image pair using foreground masking. Parameters ---------- color_im : :obj:`ColorImage` color image for detection depth_im : :obj:`DepthImage` depth image for detection (corresponds to color image) cfg : :obj:`YamlConfig` parameters of detection function camera_intr : :obj:`CameraIntrinsics` intrinsics of the camera T_camera_world : :obj:`autolab_core.RigidTransform` registration of the camera to world frame segmask : :obj:`BinaryImage` optional segmask of invalid pixels Returns ------ :obj:`list` of :obj:`RgbdDetection` all detections in the image
2.484787
2.435999
1.020028
if detector_type == 'point_cloud_box': return PointCloudBoxDetector() elif detector_type == 'rgbd_foreground_mask_query': return RgbdForegroundMaskQueryImageDetector() elif detector_type == 'rgbd_foreground_mask': return RgbdForegroundMaskDetector() raise ValueError('Detector type %s not understood' %(detector_type))
def detector(detector_type)
Returns a detector of the specified type.
3.621574
3.638063
0.995468
c_i = input.get_shape()[-1] assert c_i%group==0 assert c_o%group==0 convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding) if group==1: conv = convolve(input, kernel) else: input_groups = tf.split(input, group, axis=3) kernel_groups = tf.split(kernel, group, axis=3) output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)] conv = tf.concat(output_groups, axis=3) return tf.reshape(tf.nn.bias_add(conv, biases), [-1]+conv.get_shape().as_list()[1:])
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1)
Convolution layer helper function From https://github.com/ethereon/caffe-tensorflow
1.322246
1.335562
0.990029
self._batch_size = config['batch_size'] self._im_height = config['im_height'] self._im_width = config['im_width'] self._num_channels = config['channels'] self._output_layer = config['out_layer'] self._feature_layer = config['feature_layer'] self._out_size = None if 'out_size' in config.keys(): self._out_size = config['out_size'] self._input_arr = np.zeros([self._batch_size, self._im_height, self._im_width, self._num_channels]) if self._model_dir is None: self._net_data = np.load(config['caffe_weights']).item() self._mean = np.load(config['mean_file']) self._model_filename = None else: self._net_data = None self._mean = np.load(os.path.join(self._model_dir, 'mean.npy')) self._model_filename = os.path.join(self._model_dir, 'model.ckpt')
def _parse_config(self, config)
Parses a tensorflow configuration
2.282373
2.284596
0.999027
if self._model_filename is None: raise ValueError('Model filename not specified') # read the input image self._graph = tf.Graph() with self._graph.as_default(): # read in filenames reader = tf.train.NewCheckpointReader(self._model_filename) # load AlexNet weights weights = AlexNetWeights() weights.conv1W = tf.Variable(reader.get_tensor("Variable")) weights.conv1b = tf.Variable(reader.get_tensor("Variable_1")) weights.conv2W = tf.Variable(reader.get_tensor("Variable_2")) weights.conv2b = tf.Variable(reader.get_tensor("Variable_3")) weights.conv3W = tf.Variable(reader.get_tensor("Variable_4")) weights.conv3b = tf.Variable(reader.get_tensor("Variable_5")) weights.conv4W = tf.Variable(reader.get_tensor("Variable_6")) weights.conv4b = tf.Variable(reader.get_tensor("Variable_7")) weights.conv5W = tf.Variable(reader.get_tensor("Variable_8")) weights.conv5b = tf.Variable(reader.get_tensor("Variable_9")) weights.fc6W = tf.Variable(reader.get_tensor("Variable_10")) weights.fc6b = tf.Variable(reader.get_tensor("Variable_11")) weights.fc7W = tf.Variable(reader.get_tensor("Variable_12")) weights.fc7b = tf.Variable(reader.get_tensor("Variable_13")) weights.fc8W = tf.Variable(reader.get_tensor("Variable_14")) weights.fc8b = tf.Variable(reader.get_tensor("Variable_15")) # form network self._input_node = tf.placeholder(tf.float32, (self._batch_size, self._im_height, self._im_width, self._num_channels)) self._output_tensor = self.build_alexnet(weights) self._feature_tensor = self.build_alexnet(weights, output_layer=self._feature_layer) self._initialized = True
def _load(self)
Loads a model into weights
1.731111
1.69451
1.0216
self._graph = tf.Graph() with self._graph.as_default(): self._input_node = tf.placeholder(tf.float32, (self._batch_size, self._im_height, self._im_width, self._num_channels)) weights = self.build_alexnet_weights() self._output_tensor = self.build_alexnet(weights) self._feature_tensor = self.build_alexnet(weights, output_layer=self._feature_layer) self._initialized = True
def _initialize(self)
Open from caffe weights
2.917556
2.768928
1.053677
with self._graph.as_default(): init = tf.initialize_all_variables() self._sess = tf.Session() self._sess.run(init)
def open_session(self)
Open tensorflow session. Exposed for memory management.
3.017741
2.135402
1.413196
with self._graph.as_default(): self._sess.close() self._sess = None
def close_session(self)
Close tensorflow session. Exposes for memory management.
4.509922
2.878702
1.566651
# setup prediction num_images = image_arr.shape[0] output_arr = None # predict by filling in image array in batches close_sess = False if not self._initialized and self._dynamic_load: self._load() with self._graph.as_default(): if self._sess is None: close_sess = True self.open_session() i = 0 while i < num_images: dim = min(self._batch_size, num_images-i) cur_ind = i end_ind = cur_ind + dim self._input_arr[:dim,:,:,:] = image_arr[cur_ind:end_ind,:,:,:] - self._mean if featurize: output = self._sess.run(self._feature_tensor, feed_dict={self._input_node: self._input_arr}) else: output = self._sess.run(self._output_tensor, feed_dict={self._input_node: self._input_arr}) if output_arr is None: output_arr = output else: output_arr = np.r_[output_arr, output] i = end_ind if close_sess: self.close_session() return output_arr[:num_images,...]
def predict(self, image_arr, featurize=False)
Predict a set of images in batches. Parameters ---------- image_arr : NxHxWxC :obj:`numpy.ndarray` input set of images in a num_images x image height x image width x image channels array (must match parameters of network) featurize : bool whether or not to use the featurization layer or classification output layer Returns ------- :obj:`numpy.ndarray` num_images x feature_dim containing the output values for each input image
2.817089
2.857774
0.985763
# form image array num_images = len(images) if num_images == 0: return None for image in images: if not isinstance(image, Image): new_images = [] for image in images: if len(image.shape) > 2: new_images.append(ColorImage(image, frame='unspecified')) elif image.dtype == np.float32 or image.dtype == np.float64: new_images.append(DepthImage(image, frame='unspecified')) else: raise ValueError('Image type not understood') images = new_images break im_height = images[0].height im_width = images[0].width channels = images[0].channels tensor_channels = 3 image_arr = np.zeros([num_images, im_height, im_width, tensor_channels]) for j, image in enumerate(images): if channels == 3: image_arr[j,:,:,:] = image.raw_data else: image_arr[j,:,:,:] = np.tile(image.raw_data, [1,1,1,3]) # predict fp_start = time.time() final_blobs = self.cnn_.featurize(image_arr) fp_stop = time.time() logging.debug('Featurization took %f sec per image' %((fp_stop - fp_start) / len(images))) return final_blobs.reshape(final_blobs.shape[0], -1)
def _forward_pass(self, images)
Forward pass a list of images through the CNN
2.894176
2.89975
0.998078
return RepositoriesDataFrame(self.__engine.getRepositories(), self.session, self.__implicits)
def repositories(self)
Returns a DataFrame with the data about the repositories found at the specified repositories path in the form of siva files. >>> repos_df = engine.repositories :rtype: RepositoriesDataFrame
34.306808
19.351768
1.7728
if not isinstance(repository_ids, list): raise Exception("repository_ids must be a list") if not isinstance(reference_names, list): raise Exception("reference_names must be a list") if not isinstance(commit_hashes, list): raise Exception("commit_hashes must be a list") return BlobsDataFrame(self.__engine.getBlobs(repository_ids, reference_names, commit_hashes), self.session, self.__implicits)
def blobs(self, repository_ids=[], reference_names=[], commit_hashes=[])
Retrieves the blobs of a list of repositories, reference names and commit hashes. So the result will be a DataFrame of all the blobs in the given commits that are in the given references that belong to the given repositories. >>> blobs_df = engine.blobs(repo_ids, ref_names, hashes) Calling this function with no arguments is the same as: >>> engine.repositories.references.commits.tree_entries.blobs :param repository_ids: list of repository ids to filter by (optional) :type repository_ids: list of strings :param reference_names: list of reference names to filter by (optional) :type reference_names: list of strings :param commit_hashes: list of hashes to filter by (optional) :type commit_hashes: list of strings :rtype: BlobsDataFrame
2.687348
2.47174
1.087229
self.__engine.fromMetadata(db_path, db_name) return self
def from_metadata(self, db_path, db_name='engine_metadata.db')
Registers in the current session the views of the MetadataSource so the data is obtained from the metadata database instead of reading the repositories with the DefaultSource. :param db_path: path to the folder that contains the database. :type db_path: str :param db_name: name of the database file (engine_metadata.db) by default. :type db_name: str :returns: the same instance of the engine :rtype: Engine
11.706381
11.583963
1.010568
try: func = getattr(DataFrame, name) except AttributeError as e: # PySpark version is too old def func(self, *args, **kwargs): raise e return func wraps = getattr(functools, "wraps", lambda _: lambda f: f) # py3.4+ @wraps(func) def _wrapper(self, *args, **kwargs): dataframe = func(self, *args, **kwargs) if self.__class__ != SourcedDataFrame \ and isinstance(self, SourcedDataFrame) \ and isinstance(dataframe, DataFrame): return self.__class__(dataframe._jdf, self._session, self._implicits) return dataframe return _wrapper
def __generate_method(name)
Wraps the DataFrame's original method by name to return the derived class instance.
4.092145
3.697002
1.106882
return ReferencesDataFrame(self._engine_dataframe.getReferences(), self._session, self._implicits)
def references(self)
Returns the joined DataFrame of references and repositories. >>> refs_df = repos_df.references :rtype: ReferencesDataFrame
28.467953
21.882948
1.300919
return ReferencesDataFrame(self._engine_dataframe.getRemoteReferences(), self._session, self._implicits)
def remote_references(self)
Returns a new DataFrame with only the remote references of the current repositories. >>> remote_refs_df = repos_df.remote_references :rtype: ReferencesDataFrame
29.06106
20.249926
1.435119
return ReferencesDataFrame(self._engine_dataframe.getReferences().getHEAD(), self._session, self._implicits)
def master_ref(self)
Filters the current DataFrame references to only contain those rows whose reference is master. >>> master_df = repos_df.master_ref :rtype: ReferencesDataFrame
54.047245
43.114948
1.253562
return ReferencesDataFrame(self._engine_dataframe.getHEAD(), self._session, self._implicits)
def head_ref(self)
Filters the current DataFrame to only contain those rows whose reference is HEAD. >>> heads_df = refs_df.head_ref :rtype: ReferencesDataFrame
43.353916
31.891361
1.359425
return ReferencesDataFrame(self._engine_dataframe.getMaster(), self._session, self._implicits) return self.ref('refs/heads/master')
def master_ref(self)
Filters the current DataFrame to only contain those rows whose reference is master. >>> master_df = refs_df.master_ref :rtype: ReferencesDataFrame
26.056574
21.038113
1.238541
return ReferencesDataFrame(self.filter(self.name == ref)._jdf, self._session, self._implicits)
def ref(self, ref)
Filters the current DataFrame to only contain those rows whose reference is the given reference name. >>> heads_df = refs_df.ref('refs/heads/HEAD') :param ref: Reference to get :type ref: str :rtype: ReferencesDataFrame
26.059948
19.923971
1.30797
return CommitsDataFrame(self._engine_dataframe.getAllReferenceCommits(), self._session, self._implicits)
def all_reference_commits(self)
Returns the current DataFrame joined with the commits DataFrame, with all of the commits in all references. >>> commits_df = refs_df.all_reference_commits Take into account that getting all the commits will lead to a lot of repeated tree entries and blobs, thus making your query very slow. Most of the time, you just want the HEAD commit of each reference: >>> commits_df = refs_df.commits :rtype: CommitsDataFrame
31.099817
18.161901
1.712366
return CommitsDataFrame(self._engine_dataframe.getCommits(), self._session, self._implicits)
def commits(self)
Returns the current DataFrame joined with the commits DataFrame. It just returns the last commit in a reference (aka the current state). >>> commits_df = refs_df.commits If you want all commits from the references, use the `all_reference_commits` method, but take into account that getting all the commits will lead to a lot of repeated tree entries and blobs, thus making your query very slow. >>> commits_df = refs_df.all_reference_commits :rtype: CommitsDataFrame
27.012545
19.722841
1.369607
return BlobsDataFrame(self._engine_dataframe.getBlobs(), self._session, self._implicits)
def blobs(self)
Returns this DataFrame joined with the blobs DataSource. >>> blobs_df = refs_df.blobs :rtype: BlobsDataFrame
19.546482
15.144263
1.290686
return TreeEntriesDataFrame(self._engine_dataframe.getTreeEntries(), self._session, self._implicits)
def tree_entries(self)
Returns this DataFrame joined with the tree entries DataSource. >>> entries_df = commits_df.tree_entries :rtype: TreeEntriesDataFrame
23.991058
17.944063
1.336991
return BlobsWithLanguageDataFrame(self._engine_dataframe.classifyLanguages(), self._session, self._implicits)
def classify_languages(self)
Returns a new DataFrame with the language data of any blob added to its row. >>> blobs_lang_df = blobs_df.classify_languages :rtype: BlobsWithLanguageDataFrame
44.505054
17.572603
2.532639
return UASTsDataFrame(self._engine_dataframe.extractUASTs(), self._session, self._implicits)
def extract_uasts(self)
Returns a new DataFrame with the parsed UAST data of any blob added to its row. >>> blobs_df.extract_uasts :rtype: UASTsDataFrame
21.292215
13.269065
1.604651
return UASTsDataFrame(self._engine_dataframe.queryUAST(query, query_col, output_col), self._session, self._implicits)
def query_uast(self, query, query_col='uast', output_col='result')
Queries the UAST of a file with the given query to get specific nodes. >>> rows = uasts_df.query_uast('//*[@roleIdentifier]').collect() >>> rows = uasts_df.query_uast('//*[@roleIdentifier]', 'foo', 'bar') :param query: xpath query :type query: str :param query_col: column containing the list of nodes to query :type query_col: str :param output_col: column to place the result of the query :type output_col: str :rtype: UASTsDataFrame
11.879222
11.353455
1.046309
return UASTsDataFrame(self._engine_dataframe.extractTokens(input_col, output_col), self._session, self._implicits)
def extract_tokens(self, input_col='result', output_col='tokens')
Extracts the tokens from UAST nodes. >>> rows = uasts_df.query_uast('//*[@roleIdentifier]').extract_tokens().collect() >>> rows = uasts_df.query_uast('//*[@roleIdentifier]', output_col='foo').extract_tokens('foo', 'bar') :param input_col: column containing the list of nodes to extract tokens from :type input_col: str :param output_col: column to place the resultant tokens :type output_col: str :rtype: UASTsDataFrame
12.356395
11.187142
1.104518
kwargs = dict() if prefix is not None: kwargs['prefix'] = prefix if delimiter is not None: kwargs['delimiter'] = delimiter bucket_obj = self._ensure_bucket_loaded(bucket) for blob_obj in bucket_obj.list_blobs(**kwargs): yield blob_obj.name
def list( self, bucket: str, prefix: str=None, delimiter: str=None, ) -> typing.Iterator[str]
Returns an iterator of all blob entries in a bucket that match a given prefix. Do not return any keys that contain the delimiter past the prefix.
2.567988
2.656994
0.966502
bucket_obj = self._ensure_bucket_loaded(bucket) try: bucket_obj.delete_blob(key) except NotFound: return False
def delete(self, bucket: str, key: str)
Deletes an object in a bucket. If the operation definitely did not delete anything, return False. Any other return value is treated as something was possibly deleted.
5.54233
4.105061
1.350121
bucket_obj = self._ensure_bucket_loaded(bucket) blob_obj = bucket_obj.blob(key) try: return blob_obj.download_as_string() except NotFound: raise BlobNotFoundError(f"Could not find gs://{bucket}/{key}")
def get(self, bucket: str, key: str) -> bytes
Retrieves the data for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the data
3.840881
4.881526
0.78682
blob_obj = self._get_blob_obj(bucket, key) return self.compute_cloud_checksum(blob_obj)
def get_cloud_checksum( self, bucket: str, key: str ) -> str
Retrieves the cloud-provided checksum for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which checksum is being retrieved. :return: the cloud-provided checksum
5.298443
5.899637
0.898096
blob_obj = self._get_blob_obj(bucket, key) return blob_obj.content_type
def get_content_type( self, bucket: str, key: str ) -> str
Retrieves the content-type for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which content-type is being retrieved. :return: the content-type
5.125426
5.536971
0.925673
blob_obj = self._get_blob_obj(bucket, key) assert binascii.hexlify(base64.b64decode(blob_obj.crc32c)).decode("utf-8").lower() == cloud_checksum return blob_obj.generation
def get_copy_token( self, bucket: str, key: str, cloud_checksum: str, ) -> typing.Any
Given a bucket, key, and the expected cloud-provided checksum, retrieve a token that can be passed into :func:`~cloud_blobstore.BlobStore.copy` that guarantees the copy refers to the same version of the blob identified by the checksum. :param bucket: the bucket the object resides in. :param key: the key of the object for which checksum is being retrieved. :param cloud_checksum: the expected cloud-provided checksum. :return: an opaque copy token
3.805233
4.168367
0.912883
blob_obj = self._get_blob_obj(bucket, key) return blob_obj.time_created
def get_creation_date( self, bucket: str, key: str, ) -> datetime.datetime
Retrieves the creation date for a given key in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which the creation date is being retrieved. :return: the creation date
5.351763
6.669984
0.802365
blob_obj = self._get_blob_obj(bucket, key) return blob_obj.updated
def get_last_modified_date( self, bucket: str, key: str, ) -> datetime.datetime
Retrieves last modified date for a given key in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which the last modified date is being retrieved. :return: the last modified date
5.755784
7.592952
0.758043
blob_obj = self._get_blob_obj(bucket, key) return blob_obj.metadata
def get_user_metadata( self, bucket: str, key: str ) -> typing.Dict[str, str]
Retrieves the user metadata for a given object in a given bucket. If the platform has any mandatory prefixes or suffixes for the metadata keys, they should be stripped before being returned. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: a dictionary mapping metadata keys to metadata values.
5.658335
6.609237
0.856125
blob_obj = self._get_blob_obj(bucket, key) return blob_obj.size
def get_size( self, bucket: str, key: str ) -> int
Retrieves the filesize :param bucket: the bucket the object resides in. :param key: the key of the object for which size is being retrieved. :return: integer equal to filesize in bytes
5.300533
5.337526
0.993069
bucket_obj = self.gcp_client.bucket(bucket) # type: Bucket return bucket_obj.exists()
def check_bucket_exists(self, bucket: str) -> bool
Checks if bucket with specified name exists. :param bucket: the bucket to be checked. :return: true if specified bucket exists.
6.442976
6.565693
0.981309
@wraps(API_WRAPPER._get_shard) def get_shard(*arg, **kwargs): .format(shard) return self.get_shards(Shard(shard, *arg, **kwargs)) return get_shard
def _get_shard(self, shard)
Dynamically Builds methods to query shard with proper with arg and kwargs support
8.252037
6.166064
1.338299
try: resp = self._request(shards) if return_status_tuple: return (self._parser(resp, full_response), True) else: return self._parser(resp, full_response) except (ConflictError, CloudflareServerError, InternalServerError) as exc: # The Retry system if return_status_tuple: return (None, False) elif self.api_mother.do_retry: # TODO # request_limit = 0 sleep(self.api_mother.retry_sleep) resp = self.request(shards, full_response, True) while not resp[1]: sleep(self.api_mother.retry_sleep) resp = self.request(shards, full_response, True) return resp[0] else: raise exc
def request(self, shards, full_response, return_status_tuple=False)
Request the API This method is wrapped by similar functions
3.486284
3.612279
0.96512
resp = self.request(shards=args, full_response=full_response) return resp
def get_shards(self, *args, full_response=False)
Get Shards
7.053243
7.353735
0.959137
def command(self, command, full_response=False, **kwargs): # pragma: no cover command = Shard(c=command) return self.get_shards(*(command, Shard(**kwargs)), full_response=full_response)
Method Interface to the command API for Nationstates
null
null
null
pass else: telegram = self.api_mother.telegram(client_key, tgid, key) telegram.send_telegram(self.nation_name)
def send_telegram(telegram=None, client_key=None, tgid=None, key=None): # pragma: no cover if telegram
Sends Telegram. Can either provide a telegram directly, or provide the api details and created internally
8.658482
7.61153
1.137548
payload = {"checksum":checksum, "a":"verify"} if token: payload.update({"token":token}) return self.get_shards(Shard(**payload), full_response=True)
def verify(self, checksum=None, token=None, full_response=False)
Wraps around the verify API
8.572248
8.133
1.054008
raise NotImplementedError()
def get_listing_from_response(self, resp) -> typing.Iterable[typing.Tuple[str, dict]]
Retrieve blob metadata objects from blobstore response. Metadata objects represented as tuples in the form of: (key, {BlobMetadataField: val, ...})
200.595413
93.492645
2.145574
raise NotImplementedError()
def list( self, bucket: str, prefix: str=None, delimiter: str=None, ) -> typing.Iterator[str]
Returns an iterator of all blob entries in a bucket that match a given prefix. Do not return any keys that contain the delimiter past the prefix.
142.380798
184.845276
0.77027
raise NotImplementedError()
def list_v2( self, bucket: str, prefix: str=None, delimiter: str=None, start_after_key: str=None, token: str=None, k_page_max: int=None, ) -> typing.Iterable[typing.Tuple[str, dict]]
Returns an iterator of all blob entries in a bucket that match a given prefix. Do not return any keys that contain the delimiter past the prefix.
150.186371
180.364731
0.832681
raise NotImplementedError()
def upload_file_handle( self, bucket: str, key: str, src_file_handle: typing.BinaryIO, content_type: str=None, metadata: dict=None)
Saves the contents of a file handle as the contents of an object in a bucket.
314.797119
117.783745
2.67267
raise NotImplementedError()
def get_copy_token( self, bucket: str, key: str, cloud_checksum: str, ) -> typing.Any
Given a bucket, key, and the expected cloud-provided checksum, retrieve a token that can be passed into :func:`~cloud_blobstore.BlobStore.copy` that guarantees the copy refers to the same version of the blob identified by the checksum. :param bucket: the bucket the object resides in. :param key: the key of the object for which checksum is being retrieved. :param cloud_checksum: the expected cloud-provided checksum. :return: an opaque copy token
495.639221
1,361.04187
0.364162
raise NotImplementedError()
def get_user_metadata( self, bucket: str, key: str ) -> typing.Dict[str, str]
Retrieves the user metadata for a given object in a given bucket. If the platform has any mandatory prefixes or suffixes for the metadata keys, they should be stripped before being returned. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: a dictionary mapping metadata keys to metadata values.
330.115875
310.642792
1.062686
kwargs = dict() if prefix is not None: kwargs['Prefix'] = prefix if delimiter is not None: kwargs['Delimiter'] = delimiter for item in ( boto3.resource("s3").Bucket(bucket). objects. filter(**kwargs)): yield item.key
def list( self, bucket: str, prefix: str=None, delimiter: str=None, ) -> typing.Iterator[str]
Returns an iterator of all blob entries in a bucket that match a given prefix. Do not return any keys that contain the delimiter past the prefix.
3.268027
3.485133
0.937705
try: response = self.s3_client.get_object( Bucket=bucket, Key=key ) return response['Body'].read() except botocore.exceptions.ClientError as ex: if ex.response['Error']['Code'] == "NoSuchKey": raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex raise BlobStoreUnknownError(ex)
def get(self, bucket: str, key: str) -> bytes
Retrieves the data for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the data
2.54049
2.656487
0.956335
try: return self.s3_client.head_object( Bucket=bucket, Key=key ) except botocore.exceptions.ClientError as ex: if str(ex.response['Error']['Code']) == \ str(requests.codes.not_found): raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex raise BlobStoreUnknownError(ex)
def get_all_metadata( self, bucket: str, key: str ) -> dict
Retrieves all the metadata for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the metadata
3.433005
3.556832
0.965186
response = self.get_all_metadata(bucket, key) # hilariously, the ETag is quoted. Unclear why. return response['ContentType']
def get_content_type( self, bucket: str, key: str ) -> str
Retrieves the content-type for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which content-type is being retrieved. :return: the content-type
16.130898
17.571016
0.91804
response = self.get_all_metadata(bucket, key) return self.compute_cloud_checksum(response)
def get_cloud_checksum( self, bucket: str, key: str ) -> str
Retrieves the cloud-provided checksum for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which checksum is being retrieved. :return: the cloud-provided checksum
7.30978
8.449425
0.865122
# An S3 object's creation date is stored in its LastModified field which stores the # most recent value between the two. return self.get_last_modified_date(bucket, key)
def get_creation_date( self, bucket: str, key: str, ) -> datetime
Retrieves the creation date for a given key in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which the creation date is being retrieved. :return: the creation date
12.647687
15.472227
0.817445
response = self.get_all_metadata(bucket, key) return response['LastModified']
def get_last_modified_date( self, bucket: str, key: str, ) -> datetime
Retrieves last modified date for a given key in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which the last modified date is being retrieved. :return: the last modified date
8.211037
9.989196
0.821992
try: response = self.get_all_metadata(bucket, key) metadata = response['Metadata'].copy() response = self.s3_client.get_object_tagging( Bucket=bucket, Key=key, ) for tag in response['TagSet']: key, value = tag['Key'], tag['Value'] metadata[key] = value return metadata except botocore.exceptions.ClientError as ex: if str(ex.response['Error']['Code']) == \ str(requests.codes.not_found): raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex raise BlobStoreUnknownError(ex)
def get_user_metadata( self, bucket: str, key: str ) -> typing.Dict[str, str]
Retrieves the user metadata for a given object in a given bucket. If the platform has any mandatory prefixes or suffixes for the metadata keys, they should be stripped before being returned. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: a dictionary mapping metadata keys to metadata values.
2.765518
2.813277
0.983024
try: response = self.get_all_metadata(bucket, key) size = response['ContentLength'] return size except botocore.exceptions.ClientError as ex: if str(ex.response['Error']['Code']) == str(requests.codes.not_found): raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex raise BlobStoreUnknownError(ex)
def get_size( self, bucket: str, key: str ) -> int
Retrieves the filesize :param bucket: the bucket the object resides in. :param key: the key of the object for which size is being retrieved. :return: integer equal to filesize in bytes
3.609524
3.542521
1.018914
if part_count < search_start: raise ValueError("") result = list() while True: kwargs = dict(Bucket=bucket, Key=key, UploadId=upload_id) # type: dict if search_start > 1: kwargs['PartNumberMarker'] = search_start - 1 # retrieve all the parts after the one we *think* we need to start from. parts_resp = self.s3_client.list_parts(**kwargs) # build a set of all the parts known to be uploaded, detailed in this request. parts_map = set() # type: typing.Set[int] for part_detail in parts_resp.get('Parts', []): parts_map.add(part_detail['PartNumber']) while True: if search_start not in parts_map: # not found, add it to the list of parts we still need. result.append(search_start) # have we met our requirements? if len(result) == return_count or search_start == part_count: return result search_start += 1 if parts_resp['IsTruncated'] and search_start == parts_resp['NextPartNumberMarker']: # finished examining the results of this batch, move onto the next one break
def find_next_missing_parts( self, bucket: str, key: str, upload_id: str, part_count: int, search_start: int=1, return_count: int=1) -> typing.Sequence[int]
Given a `bucket`, `key`, and `upload_id`, find the next N missing parts of a multipart upload, where N=`return_count`. If `search_start` is provided, start the search at part M, where M=`search_start`. `part_count` is the number of parts expected for the upload. Note that the return value may contain fewer than N parts.
3.492073
3.518921
0.99237
exists = True try: self.s3_client.head_bucket(Bucket=bucket) except botocore.exceptions.ClientError as e: # If a client error is thrown, then check that it was a 404 error. # If it was a 404 error, then the bucket does not exist. error_code = int(e.response['Error']['Code']) if error_code == 404: exists = False return exists
def check_bucket_exists(self, bucket: str) -> bool
Checks if bucket with specified name exists. :param bucket: the bucket to be checked. :return: true if specified bucket exists.
1.503573
1.528369
0.983776
region = self.s3_client.get_bucket_location(Bucket=bucket)["LocationConstraint"] return 'us-east-1' if region is None else region
def get_bucket_region(self, bucket) -> str
Get region associated with a specified bucket name. :param bucket: the bucket to be checked. :return: region, Note that underlying AWS API returns None for default US-East-1, I'm replacing that with us-east-1.
3.163371
2.665272
1.186885
def get_version(filename='scanf.py'): version = '' with open(filename, 'r') as fp: for line in fp: m = re.search('__version__ .* ''(.*)''', line) if m is not None: version = (m.group(1)).strip('\'') break return version
Extract version information from source code
null
null
null
format_pat = "" cast_list = [] i = 0 length = len(format) while i < length: found = None for token, pattern, cast in scanf_translate: found = token.match(format, i) if found: if cast: # cast != None cast_list.append(cast) groups = found.groupdict() or found.groups() if groups: pattern = pattern % groups format_pat += pattern i = found.end() break if not found: char = format[i] # escape special characters if char in "|^$()[]-.+*?{}<>\\": format_pat += "\\" format_pat += char i += 1 if DEBUG: print("DEBUG: %r -> %s" % (format, format_pat)) if collapseWhitespace: format_pat = re.sub(r'\s+', r'\\s+', format_pat) format_re = re.compile(format_pat) return format_re, cast_list
def scanf_compile(format, collapseWhitespace=True)
Translate the format into a regular expression For example: >>> format_re, casts = scanf_compile('%s - %d errors, %d warnings') >>> print format_re.pattern (\S+) \- ([+-]?\d+) errors, ([+-]?\d+) warnings Translated formats are cached for faster reuse
3.307321
3.063959
1.079427
if s is None: s = sys.stdin if hasattr(s, "readline"): s = s.readline() format_re, casts = scanf_compile(format, collapseWhitespace) found = format_re.search(s) if found: groups = found.groups() return tuple([casts[i](groups[i]) for i in range(len(groups))])
def scanf(format, s=None, collapseWhitespace=True)
scanf supports the following formats: %c One character %5c 5 characters %d, %i int value %7d, %7i int value with length 7 %f float value %o octal value %X, %x hex value %s string terminated by whitespace Examples: >>> scanf("%s - %d errors, %d warnings", "/usr/sbin/sendmail - 0 errors, 4 warnings") ('/usr/sbin/sendmail', 0, 4) >>> scanf("%o %x %d", "0123 0x123 123") (83, 291, 123) scanf.scanf returns a tuple of found values or None if the format does not match.
3.149722
3.150228
0.999839
y = [] if text is None: textsource = open(filepath, 'r') else: textsource = text.splitlines() for line in textsource: match = scanf(pattern, line) if match: if len(y) == 0: y = [[s] for s in match] else: for i, ydata in enumerate(y): ydata.append(match[i]) if text is None: textsource.close() return y
def extractdata(pattern, text=None, filepath=None)
Read through an entire file or body of text one line at a time. Parse each line that matches the supplied pattern string and ignore the rest. If *text* is supplied, it will be parsed according to the *pattern* string. If *text* is not supplied, the file at *filepath* will be opened and parsed.
2.694295
2.907651
0.926623
return Nation(nation_name, self, password=password, autologin=autologin)
def nation(self, nation_name, password=None, autologin=None)
Setup access to the Nation API with the Nation object :param nation_name: Name of the nation :param password: (Optional) password for this nation :param autologin (Optional) autologin for this nation :type nation_name: str :type password: str :type autologin: str :returns: Nation Object based off nation_name :rtype: Nation
3.441738
4.91445
0.70033
if isinstance(chamber, int): chamber = str(chamber) return WorldAssembly(chamber, self)
def wa(self, chamber)
Setup access to the World Assembly API with the WorldAssembly object :param chamber: Chamber of the WA :type chamber: str, int :returns: WorldAssembly Object based off region_name :rtype: WorldAssembly
5.83457
4.587895
1.271731
return Telegram(self, client_key, tgid, key)
def telegram(self, client_key=None, tgid=None, key=None)
Create Telegram Templates which can be used to send telegrams :param client_key: Client Key Nationstates Gave you :param tgid: TGID from api template :param key: Key from api Template
5.697269
8.088024
0.704408
pass if isinstance(diffs, patch.diff): diffs = [diffs] for diff in diffs: if diff.header.old_path == '/dev/null': text = [] else: with open(diff.header.old_path) as f: text = f.read() new_text = apply_diff(diff, text) with open(diff.header.new_path, 'w') as f: f.write(new_text)
def apply_patch(diffs)
Not ready for use yet
2.769106
2.647614
1.045888
parts = jwt.split('.') if len(parts) == 3: token_type = JWS elif len(parts) == 5: token_type = JWE else: raise Error('Malformed JWT') return token_type(*parts)
def deserialize_compact(jwt)
Deserialization of a compact representation of a :class:`~jwt.JWE` :param jwt: The serialized JWT to deserialize. :rtype: :class:`~jose.JWT`. :raises: :class:`~jose.Error` if the JWT is malformed
3.85372
4.088168
0.942652
# copy so the injected claim doesn't mutate the input claims # this is a temporary hack to allow for graceful deprecation of tokens, # ensuring that the library can still handle decrypting tokens issued # before the implementation of the fix claims = deepcopy(claims) assert _TEMP_VER_KEY not in claims claims[_TEMP_VER_KEY] = _TEMP_VER header = dict((add_header or {}).items() + [ (HEADER_ENC, enc), (HEADER_ALG, alg)]) # promote the temp key to the header assert _TEMP_VER_KEY not in header header[_TEMP_VER_KEY] = claims[_TEMP_VER_KEY] plaintext = json_encode(claims) # compress (if required) if compression is not None: header[HEADER_ZIP] = compression try: (compress, _) = COMPRESSION[compression] except KeyError: raise Error( 'Unsupported compression algorithm: {}'.format(compression)) plaintext = compress(plaintext) # body encryption/hash ((cipher, _), key_size), ((hash_fn, _), hash_mod) = JWA[enc] iv = rng(AES.block_size) encryption_key = rng(hash_mod.digest_size) ciphertext = cipher(plaintext, encryption_key[-hash_mod.digest_size/2:], iv) hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata), encryption_key[:-hash_mod.digest_size/2], hash_mod) # cek encryption (cipher, _), _ = JWA[alg] encryption_key_ciphertext = cipher(encryption_key, jwk) return JWE(*map(b64encode_url, (json_encode(header), encryption_key_ciphertext, iv, ciphertext, auth_tag(hash))))
def encrypt(claims, jwk, adata='', add_header=None, alg='RSA-OAEP', enc='A128CBC-HS256', rng=get_random_bytes, compression=None)
Encrypts the given claims and produces a :class:`~jose.JWE` :param claims: A `dict` representing the claims for this :class:`~jose.JWE`. :param jwk: A `dict` representing the JWK to be used for encryption of the CEK. This parameter is algorithm-specific. :param adata: Arbitrary string data to add to the authentication (i.e. HMAC). The same data must be provided during decryption. :param add_header: Additional items to be added to the header. Additional headers *will* be authenticated. :param alg: The algorithm to use for CEK encryption :param enc: The algorithm to use for claims encryption :param rng: Random number generator. A string of random bytes is expected as output. :param compression: The compression algorithm to use. Currently supports `'DEF'`. :rtype: :class:`~jose.JWE` :raises: :class:`~jose.Error` if there is an error producing the JWE
5.460432
5.493478
0.993984
# We need 5 components for JWE token # 1. Generate header header = dict((add_header or {}).items() + [(HEADER_ENC, enc), (HEADER_ALG, alg)]) protected_header = json_encode(header) # 2. Generate CEK mac_key, enc_key = _generate_encryption_keys(enc, rng) encrypted_key = _encrypt_key(mac_key + enc_key, jwk, alg) # 3. Generate Initialization Vector iv = _generate_iv(enc, rng) # 4. Generate payload plaintext = json_encode(claims) # Compress if needed if HEADER_ZIP in header: try: (compression_func, _) = COMPRESSION[header[HEADER_ZIP]] except KeyError: raise Error( 'Unsupported compression algorithm: {}'.format(header[HEADER_ZIP])) M = compression_func(plaintext) else: M = plaintext # Encrypt payload ((cipher, _), key_len), _ = JWA[enc] ciphertext = cipher(M, enc_key, iv) # 5. Generate authentication tag authentication_tag = _generate_authentication_tag( mac_key, protected_header, ciphertext, iv, enc ) return JWE( *map( b64encode_url, (protected_header, encrypted_key, iv, ciphertext, authentication_tag) ) )
def spec_compliant_encrypt(claims, jwk, add_header=None, alg='RSA-OAEP', enc='A128CBC-HS256', rng=get_random_bytes)
Encrypts the given claims and produces a :class:`~jose.JWE` :param claims: A `dict` representing the claims for this :class:`~jose.JWE`. :param jwk: A `dict` representing the JWK to be used for encryption of the CEK. This parameter is algorithm-specific. :param add_header: Additional items to be added to the header. Additional headers *will* be authenticated. :param alg: The algorithm to use for CEK encryption :param enc: The algorithm to use for claims encryption :param rng: Random number generator. A string of random bytes is expected as output. : param compression: The compression algorithm to use. Currently supports `'DEF'`. :rtype: :class:`~jose.JWE` :raises: :class:`~jose.Error` if there is an error producing the JWE
4.072731
4.200432
0.969598
protected_header, encrypted_key, iv, ciphertext, authentication_tag = map( b64decode_url, jwe) header = json_decode(protected_header) alg = header[HEADER_ALG] enc = header[HEADER_ENC] # decrypt cek encryption_key = _decrypt_key(encrypted_key, jwk, alg) # decrypt body ((_, decipher), _), ((hash_fn, _), mod) = JWA[enc] version = header.get(_TEMP_VER_KEY) if version: plaintext = decipher(ciphertext, encryption_key[-mod.digest_size/2:], iv) hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version), encryption_key[:-mod.digest_size/2], mod=mod) else: plaintext = decipher(ciphertext, encryption_key[:-mod.digest_size], iv) hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version), encryption_key[-mod.digest_size:], mod=mod) if not const_compare(auth_tag(hash), authentication_tag): raise Error('Mismatched authentication tags') if HEADER_ZIP in header: try: (_, decompress) = COMPRESSION[header[HEADER_ZIP]] except KeyError: raise Error('Unsupported compression algorithm: {}'.format( header[HEADER_ZIP])) plaintext = decompress(plaintext) claims = json_decode(plaintext) try: del claims[_TEMP_VER_KEY] except KeyError: # expected when decrypting legacy tokens pass _validate(claims, validate_claims, expiry_seconds) return JWT(header, claims)
def legacy_decrypt(jwe, jwk, adata='', validate_claims=True, expiry_seconds=None)
Decrypts a deserialized :class:`~jose.JWE` :param jwe: An instance of :class:`~jose.JWE` :param jwk: A `dict` representing the JWK required to decrypt the content of the :class:`~jose.JWE`. :param adata: Arbitrary string data used during encryption for additional authentication. :param validate_claims: A `bool` indicating whether or not the `exp`, `iat` and `nbf` claims should be validated. Defaults to `True`. :param expiry_seconds: An `int` containing the JWT expiry in seconds, used when evaluating the `iat` claim. Defaults to `None`, which disables `iat` claim validation. :rtype: :class:`~jose.JWT` :raises: :class:`~jose.Expired` if the JWT has expired :raises: :class:`~jose.NotYetValid` if the JWT is not yet valid :raises: :class:`~jose.Error` if there is an error decrypting the JWE
4.187901
4.363963
0.959656
protected_header, encrypted_key, iv, ciphertext, authentication_tag = map( b64decode_url, jwe ) header = json_decode(protected_header) if not _verify_header(header): raise Error('Header is invalid') alg = header[HEADER_ALG] enc = header[HEADER_ENC] # decrypt cek encryption_key = _decrypt_key(encrypted_key, jwk, alg) mac_key, enc_key = _parse_encryption_keys(encryption_key, enc) # verify authentication tag expected_tag = _generate_authentication_tag( mac_key, json_encode(header), ciphertext, iv, enc ) if not const_compare(expected_tag, authentication_tag): raise Error('Mismatched authentication tags') # decrypt body ((_, decipher), _), _ = JWA[enc] # http://tools.ietf.org/html/rfc7516#section-5.1 step 11 M = decipher(ciphertext, enc_key, iv) if HEADER_ZIP in header: try: (_, decompress) = COMPRESSION[header[HEADER_ZIP]] except KeyError: raise Error('Unsupported compression algorithm: {}'.format( header[HEADER_ZIP])) plaintext = decompress(M) else: plaintext = M claims = json_decode(plaintext) _validate(claims, validate_claims, expiry_seconds) return JWT(header, claims)
def spec_compliant_decrypt(jwe, jwk, validate_claims=True, expiry_seconds=None)
Decrypts a deserialized :class:`~jose.JWE` :param jwe: An instance of :class:`~jose.JWE` :param jwk: A `dict` representing the JWK required to decrypt the content of the :class:`~jose.JWE`. :param validate_claims: A `bool` indicating whether or not the `exp`, `iat` and `nbf` claims should be validated. Defaults to `True`. :param expiry_seconds: An `int` containing the JWT expiry in seconds, used when evaluating the `iat` claim. Defaults to `None`, which disables `iat` claim validation. :rtype: :class:`~jose.JWT` :raises: :class:`~jose.Expired` if the JWT has expired :raises: :class:`~jose.NotYetValid` if the JWT is not yet valid :raises: :class:`~jose.Error` if there is an error decrypting the JWE
3.920912
4.120536
0.951554