_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q12300
EnsensoSensor._depth_im_from_pointcloud
train
def _depth_im_from_pointcloud(self, msg): """ Convert a pointcloud2 message to a depth image. """ # set format if self._format is None: self._set_format(msg) # rescale camera intr in case binning is turned on if msg.height != self._camera_intr.height: rescale_factor = float(msg.height) / self._camera_intr.height self._camera_intr = self._camera_intr.resize(rescale_factor) # read num points num_points = msg.height * msg.width # read buffer raw_tup = struct.Struct(self._format).unpack_from(msg.data, 0) raw_arr = np.array(raw_tup) # subsample depth values and reshape depth_ind = 2 + 4 * np.arange(num_points) depth_buf = raw_arr[depth_ind] depth_arr = depth_buf.reshape(msg.height, msg.width) depth_im = DepthImage(depth_arr, frame=self._frame) return depth_im
python
{ "resource": "" }
q12301
EnsensoSensor.frames
train
def frames(self): """Retrieve a new frame from the Ensenso and convert it to a ColorImage, a DepthImage, and an IrImage. Returns ------- :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray` The ColorImage, DepthImage, and IrImage of the current frame. Raises ------ RuntimeError If the Ensenso stream is not running. """ # wait for a new image while self._cur_depth_im is None: time.sleep(0.01) # read next image depth_im = self._cur_depth_im color_im = ColorImage(np.zeros([depth_im.height, depth_im.width, 3]).astype(np.uint8), frame=self._frame) self._cur_depth_im = None return color_im, depth_im, None
python
{ "resource": "" }
q12302
IterativeRegistrationSolver.register
train
def register(self, source_point_cloud, target_point_cloud, source_normal_cloud, target_normal_cloud, matcher, num_iterations=1, compute_total_cost=True, match_centroids=False, vis=False): """ Iteratively register objects to one another. Parameters ---------- source_point_cloud : :obj:`autolab_core.PointCloud` source object points target_point_cloud : :obj`autolab_core.PointCloud` target object points source_normal_cloud : :obj:`autolab_core.NormalCloud` source object outward-pointing normals target_normal_cloud : :obj:`autolab_core.NormalCloud` target object outward-pointing normals matcher : :obj:`PointToPlaneFeatureMatcher` object to match the point sets num_iterations : int the number of iterations to run compute_total_cost : bool whether or not to compute the total cost upon termination. match_centroids : bool whether or not to match the centroids of the point clouds Returns ------- :obj`RegistrationResult` results containing source to target transformation and cost """ pass
python
{ "resource": "" }
q12303
TensorDatasetVirtualSensor.frames
train
def frames(self): """Retrieve the next frame from the tensor dataset and convert it to a ColorImage, a DepthImage, and an IrImage. Parameters ---------- skip_registration : bool If True, the registration step is skipped. Returns ------- :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray` The ColorImage, DepthImage, and IrImage of the current frame. Raises ------ RuntimeError If the stream is not running or if all images in the directory have been used. """ if not self._running: raise RuntimeError('Device pointing to %s not runnning. Cannot read frames' %(self._path_to_images)) if self._im_index >= self._num_images: raise RuntimeError('Device is out of images') # read images datapoint = self._dataset.datapoint(self._im_index, TensorDatasetVirtualSensor.IMAGE_FIELDS) color_im = ColorImage(datapoint[TensorDatasetVirtualSensor.COLOR_IM_FIELD], frame=self._frame) depth_im = DepthImage(datapoint[TensorDatasetVirtualSensor.DEPTH_IM_FIELD], frame=self._frame) if self._image_rescale_factor != 1.0: color_im = color_im.resize(self._image_rescale_factor) depth_im = depth_im.resize(self._image_rescale_factor, interp='nearest') self._im_index = (self._im_index + 1) % self._num_images return color_im, depth_im, None
python
{ "resource": "" }
q12304
PrimesenseSensor._read_depth_image
train
def _read_depth_image(self): """ Reads a depth image from the device """ # read raw uint16 buffer im_arr = self._depth_stream.read_frame() raw_buf = im_arr.get_buffer_as_uint16() buf_array = np.array([raw_buf[i] for i in range(PrimesenseSensor.DEPTH_IM_WIDTH * PrimesenseSensor.DEPTH_IM_HEIGHT)]) # convert to image in meters depth_image = buf_array.reshape(PrimesenseSensor.DEPTH_IM_HEIGHT, PrimesenseSensor.DEPTH_IM_WIDTH) depth_image = depth_image * MM_TO_METERS # convert to meters if self._flip_images: depth_image = np.flipud(depth_image) else: depth_image = np.fliplr(depth_image) return DepthImage(depth_image, frame=self._frame)
python
{ "resource": "" }
q12305
PrimesenseSensor._read_color_image
train
def _read_color_image(self): """ Reads a color image from the device """ # read raw buffer im_arr = self._color_stream.read_frame() raw_buf = im_arr.get_buffer_as_triplet() r_array = np.array([raw_buf[i][0] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)]) g_array = np.array([raw_buf[i][1] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)]) b_array = np.array([raw_buf[i][2] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)]) # convert to uint8 image color_image = np.zeros([PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH, 3]) color_image[:,:,0] = r_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH) color_image[:,:,1] = g_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH) color_image[:,:,2] = b_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH) if self._flip_images: color_image = np.flipud(color_image.astype(np.uint8)) else: color_image = np.fliplr(color_image.astype(np.uint8)) return ColorImage(color_image, frame=self._frame)
python
{ "resource": "" }
q12306
PrimesenseSensor_ROS._read_depth_images
train
def _read_depth_images(self, num_images): """ Reads depth images from the device """ depth_images = self._ros_read_images(self._depth_image_buffer, num_images, self.staleness_limit) for i in range(0, num_images): depth_images[i] = depth_images[i] * MM_TO_METERS # convert to meters if self._flip_images: depth_images[i] = np.flipud(depth_images[i]) depth_images[i] = np.fliplr(depth_images[i]) depth_images[i] = DepthImage(depth_images[i], frame=self._frame) return depth_images
python
{ "resource": "" }
q12307
PrimesenseSensor_ROS._read_color_images
train
def _read_color_images(self, num_images): """ Reads color images from the device """ color_images = self._ros_read_images(self._color_image_buffer, num_images, self.staleness_limit) for i in range(0, num_images): if self._flip_images: color_images[i] = np.flipud(color_images[i].astype(np.uint8)) color_images[i] = np.fliplr(color_images[i].astype(np.uint8)) color_images[i] = ColorImage(color_images[i], frame=self._frame) return color_images
python
{ "resource": "" }
q12308
ObjectRender.T_obj_camera
train
def T_obj_camera(self): """Returns the transformation from camera to object when the object is in the given stable pose. Returns ------- :obj:`autolab_core.RigidTransform` The desired transform. """ if self.stable_pose is None: T_obj_world = RigidTransform(from_frame='obj', to_frame='world') else: T_obj_world = self.stable_pose.T_obj_table.as_frames('obj', 'world') T_camera_obj = T_obj_world.inverse() * self.T_camera_world return T_camera_obj
python
{ "resource": "" }
q12309
QueryImageBundle.image
train
def image(self, render_mode): """Return an image generated with a particular render mode. Parameters ---------- render_mode : :obj:`RenderMode` The type of image we want. Returns ------- :obj:`Image` The color, depth, or binary image if render_mode is COLOR, DEPTH, or SEGMASK respectively. """ if render_mode == RenderMode.COLOR: return self.color_im elif render_mode == RenderMode.DEPTH: return self.depth_im elif render_mode == RenderMode.SEGMASK: return self.binary_im else: return None
python
{ "resource": "" }
q12310
BagOfFeatures.add
train
def add(self, feature): """ Add a new feature to the bag. Parameters ---------- feature : :obj:`Feature` feature to add """ self.features_.append(feature) self.num_features_ = len(self.features_)
python
{ "resource": "" }
q12311
BagOfFeatures.extend
train
def extend(self, features): """ Add a list of features to the bag. Parameters ---------- feature : :obj:`list` of :obj:`Feature` features to add """ self.features_.extend(features) self.num_features_ = len(self.features_)
python
{ "resource": "" }
q12312
BagOfFeatures.feature
train
def feature(self, index): """ Returns a feature. Parameters ---------- index : int index of feature in list Returns ------- :obj:`Feature` """ if index < 0 or index >= self.num_features_: raise ValueError('Index %d out of range' %(index)) return self.features_[index]
python
{ "resource": "" }
q12313
WeightSensor.total_weight
train
def total_weight(self): """Read a weight from the sensor in grams. Returns ------- weight : float The sensor weight in grams. """ weights = self._raw_weights() if weights.shape[1] == 0: return 0.0 elif weights.shape[1] < self._ntaps: return np.sum(np.mean(weights, axis=1)) else: return self._filter_coeffs.dot(np.sum(weights, axis=0))
python
{ "resource": "" }
q12314
WeightSensor.individual_weights
train
def individual_weights(self): """Read individual weights from the load cells in grams. Returns ------- weight : float The sensor weight in grams. """ weights = self._raw_weights() if weights.shape[1] == 0: return np.zeros(weights.shape[0]) elif weights.shape[1] < self._ntaps: return np.mean(weights, axis=1) else: return weights.dot(self._filter_coeffs)
python
{ "resource": "" }
q12315
WeightSensor._raw_weights
train
def _raw_weights(self): """Create a numpy array containing the raw sensor weights. """ if self._debug: return np.array([[],[],[],[]]) if not self._running: raise ValueError('Weight sensor is not running!') if len(self._weight_buffers) == 0: time.sleep(0.3) if len(self._weight_buffers) == 0: raise ValueError('Weight sensor is not retrieving data!') weights = np.array(self._weight_buffers) return weights
python
{ "resource": "" }
q12316
WeightSensor._weights_callback
train
def _weights_callback(self, msg): """Callback for recording weights from sensor. """ # Read weights weights = np.array(msg.data) # If needed, initialize indiv_weight_buffers if len(self._weight_buffers) == 0: self._weight_buffers = [[] for i in range(len(weights))] # Record individual weights for i, w in enumerate(weights): if len(self._weight_buffers[i]) == self._ntaps: self._weight_buffers[i].pop(0) self._weight_buffers[i].append(w)
python
{ "resource": "" }
q12317
CameraIntrinsics.crop
train
def crop(self, height, width, crop_ci, crop_cj): """ Convert to new camera intrinsics for crop of image from original camera. Parameters ---------- height : int height of crop window width : int width of crop window crop_ci : int row of crop window center crop_cj : int col of crop window center Returns ------- :obj:`CameraIntrinsics` camera intrinsics for cropped window """ cx = self.cx + float(width-1)/2 - crop_cj cy = self.cy + float(height-1)/2 - crop_ci cropped_intrinsics = CameraIntrinsics(frame=self.frame, fx=self.fx, fy=self.fy, skew=self.skew, cx=cx, cy=cy, height=height, width=width) return cropped_intrinsics
python
{ "resource": "" }
q12318
CameraIntrinsics.project
train
def project(self, point_cloud, round_px=True): """Projects a point cloud onto the camera image plane. Parameters ---------- point_cloud : :obj:`autolab_core.PointCloud` or :obj:`autolab_core.Point` A PointCloud or Point to project onto the camera image plane. round_px : bool If True, projections are rounded to the nearest pixel. Returns ------- :obj:`autolab_core.ImageCoords` or :obj:`autolab_core.Point` A corresponding set of image coordinates representing the given PointCloud's projections onto the camera image plane. If the input was a single Point, returns a 2D Point in the camera plane. Raises ------ ValueError If the input is not a PointCloud or Point in the same reference frame as the camera. """ if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3): raise ValueError('Must provide PointCloud or 3D Point object for projection') if point_cloud.frame != self._frame: raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame)) points_proj = self._K.dot(point_cloud.data) if len(points_proj.shape) == 1: points_proj = points_proj[:, np.newaxis] point_depths = np.tile(points_proj[2,:], [3, 1]) points_proj = np.divide(points_proj, point_depths) if round_px: points_proj = np.round(points_proj) if isinstance(point_cloud, Point): return Point(data=points_proj[:2,:].astype(np.int16), frame=self._frame) return ImageCoords(data=points_proj[:2,:].astype(np.int16), frame=self._frame)
python
{ "resource": "" }
q12319
CameraIntrinsics.deproject_to_image
train
def deproject_to_image(self, depth_image): """Deprojects a DepthImage into a PointCloudImage. Parameters ---------- depth_image : :obj:`DepthImage` The 2D depth image to projet into a point cloud. Returns ------- :obj:`PointCloudImage` A point cloud image created from the depth image. Raises ------ ValueError If depth_image is not a valid DepthImage in the same reference frame as the camera. """ point_cloud = self.deproject(depth_image) point_cloud_im_data = point_cloud.data.T.reshape(depth_image.height, depth_image.width, 3) return PointCloudImage(data=point_cloud_im_data, frame=self._frame)
python
{ "resource": "" }
q12320
load_images
train
def load_images(cfg): """Helper function for loading a set of color images, depth images, and IR camera intrinsics. The config dictionary must have these keys: - prestored_data -- If 1, use the virtual sensor, else use a real sensor. - prestored_data_dir -- A path to the prestored data dir for a virtual sensor. - sensor/frame -- The frame of reference for the sensor. - sensor/device_num -- The device number for the real Kinect. - sensor/pipeline_mode -- The mode for the real Kinect's packet pipeline. - num_images -- The number of images to generate. Parameters ---------- cfg : :obj:`dict` A config dictionary. Returns ------- :obj:`tuple` of :obj:`list` of :obj:`ColorImage`, :obj:`list` of :obj:`DepthImage`, :obj:`CameraIntrinsics` A set of ColorImages and DepthImages, and the Kinect's CameraIntrinsics for its IR sensor. """ if 'prestored_data' in cfg.keys() and cfg['prestored_data'] == 1: sensor = VirtualKinect2Sensor(path_to_images=cfg['prestored_data_dir'], frame=cfg['sensor']['frame']) else: sensor = Kinect2Sensor(device_num=cfg['sensor']['device_num'], frame=cfg['sensor']['frame'], packet_pipeline_mode=cfg['sensor']['pipeline_mode']) sensor.start() ir_intrinsics = sensor.ir_intrinsics # get raw images colors = [] depths = [] for _ in range(cfg['num_images']): color, depth, _ = sensor.frames() colors.append(color) depths.append(depth) sensor.stop() return colors, depths, ir_intrinsics
python
{ "resource": "" }
q12321
Kinect2Sensor.start
train
def start(self): """Starts the Kinect v2 sensor stream. Raises ------ IOError If the Kinect v2 is not detected. """ # open packet pipeline if self._packet_pipeline_mode == Kinect2PacketPipelineMode.OPENGL: self._pipeline = lf2.OpenGLPacketPipeline() elif self._packet_pipeline_mode == Kinect2PacketPipelineMode.CPU: self._pipeline = lf2.CpuPacketPipeline() # setup logger self._logger = lf2.createConsoleLogger(lf2.LoggerLevel.Warning) lf2.setGlobalLogger(self._logger) # check devices self._fn_handle = lf2.Freenect2() self._num_devices = self._fn_handle.enumerateDevices() if self._num_devices == 0: raise IOError('Failed to start stream. No Kinect2 devices available!') if self._num_devices <= self._device_num: raise IOError('Failed to start stream. Device num %d unavailable!' %(self._device_num)) # open device self._serial = self._fn_handle.getDeviceSerialNumber(self._device_num) self._device = self._fn_handle.openDevice(self._serial, pipeline=self._pipeline) # add device sync modes self._listener = lf2.SyncMultiFrameListener( lf2.FrameType.Color | lf2.FrameType.Ir | lf2.FrameType.Depth) self._device.setColorFrameListener(self._listener) self._device.setIrAndDepthFrameListener(self._listener) # start device self._device.start() # open registration self._registration = None if self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH: logging.debug('Using color to depth registration') self._registration = lf2.Registration(self._device.getIrCameraParams(), self._device.getColorCameraParams()) self._running = True
python
{ "resource": "" }
q12322
Kinect2Sensor.stop
train
def stop(self): """Stops the Kinect2 sensor stream. Returns ------- bool True if the stream was stopped, False if the device was already stopped or was not otherwise available. """ # check that everything is running if not self._running or self._device is None: logging.warning('Kinect2 device %d not runnning. Aborting stop' %(self._device_num)) return False # stop the device self._device.stop() self._device.close() self._device = None self._running = False return True
python
{ "resource": "" }
q12323
Kinect2Sensor._frames_and_index_map
train
def _frames_and_index_map(self, skip_registration=False): """Retrieve a new frame from the Kinect and return a ColorImage, DepthImage, IrImage, and a map from depth pixels to color pixel indices. Parameters ---------- skip_registration : bool If True, the registration step is skipped. Returns ------- :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray` The ColorImage, DepthImage, and IrImage of the current frame, and an ndarray that maps pixels of the depth image to the index of the corresponding pixel in the color image. Raises ------ RuntimeError If the Kinect stream is not running. """ if not self._running: raise RuntimeError('Kinect2 device %s not runnning. Cannot read frames' %(self._device_num)) # read frames frames = self._listener.waitForNewFrame() unregistered_color = frames['color'] distorted_depth = frames['depth'] ir = frames['ir'] # apply color to depth registration color_frame = self._color_frame color = unregistered_color depth = distorted_depth color_depth_map = np.zeros([depth.height, depth.width]).astype(np.int32).ravel() if not skip_registration and self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH: color_frame = self._ir_frame depth = lf2.Frame(depth.width, depth.height, 4, lf2.FrameType.Depth) color = lf2.Frame(depth.width, depth.height, 4, lf2.FrameType.Color) self._registration.apply(unregistered_color, distorted_depth, depth, color, color_depth_map=color_depth_map) # convert to array (copy needed to prevent reference of deleted data color_arr = copy.copy(color.asarray()) color_arr[:,:,[0,2]] = color_arr[:,:,[2,0]] # convert BGR to RGB color_arr[:,:,0] = np.fliplr(color_arr[:,:,0]) color_arr[:,:,1] = np.fliplr(color_arr[:,:,1]) color_arr[:,:,2] = np.fliplr(color_arr[:,:,2]) color_arr[:,:,3] = np.fliplr(color_arr[:,:,3]) depth_arr = np.fliplr(copy.copy(depth.asarray())) ir_arr = np.fliplr(copy.copy(ir.asarray())) # convert meters if self._depth_mode == Kinect2DepthMode.METERS: depth_arr = depth_arr * MM_TO_METERS # Release and return self._listener.release(frames) return (ColorImage(color_arr[:,:,:3], color_frame), DepthImage(depth_arr, self._ir_frame), IrImage(ir_arr.astype(np.uint16), self._ir_frame), color_depth_map)
python
{ "resource": "" }
q12324
KinectSensorBridged._color_image_callback
train
def _color_image_callback(self, image_msg): """ subscribe to image topic and keep it up to date """ color_arr = self._process_image_msg(image_msg) self._cur_color_im = ColorImage(color_arr[:,:,::-1], self._frame)
python
{ "resource": "" }
q12325
KinectSensorBridged._depth_image_callback
train
def _depth_image_callback(self, image_msg): """ subscribe to depth image topic and keep it up to date """ encoding = image_msg.encoding try: depth_arr = self._bridge.imgmsg_to_cv2(image_msg, encoding) import pdb; pdb.set_trace() except CvBridgeError as e: rospy.logerr(e) depth = np.array(depth_arr*MM_TO_METERS, np.float32) self._cur_depth_im = DepthImage(depth, self._frame)
python
{ "resource": "" }
q12326
KinectSensorBridged.frames
train
def frames(self): """Retrieve a new frame from the Ensenso and convert it to a ColorImage, a DepthImage, IrImage is always none for this type Returns ------- :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray` The ColorImage, DepthImage, and IrImage of the current frame. Raises ------ RuntimeError If the Kinect stream is not running. """ # wait for a new image while self._cur_depth_im is None or self._cur_color_im is None: time.sleep(0.01) # read next image depth_im = self._cur_depth_im color_im = self._cur_color_im self._cur_color_im = None self._cur_depth_im = None #TODO add ir image return color_im, depth_im, None
python
{ "resource": "" }
q12327
Kinect2SensorFactory.sensor
train
def sensor(sensor_type, cfg): """ Creates a Kinect2 sensor of the specified type. Parameters ---------- sensor_type : :obj:`str` the type of the sensor (real or virtual) cfg : :obj:`YamlConfig` dictionary of parameters for sensor initialization """ sensor_type = sensor_type.lower() if sensor_type == 'real': s = Kinect2Sensor(packet_pipeline_mode=cfg['pipeline_mode'], device_num=cfg['device_num'], frame=cfg['frame']) elif sensor_type == 'virtual': s = VirtualKinect2Sensor(cfg['image_dir'], frame=cfg['frame']) elif sensor_type == 'bridged': s = KinectSensorBridged(quality=cfg['quality'], frame=cfg['frame']) else: raise ValueError('Kinect2 sensor type %s not supported' %(sensor_type)) return s
python
{ "resource": "" }
q12328
WeightPublisher._connect
train
def _connect(self, id_mask): """Connects to all of the load cells serially. """ # Get all devices attached as USB serial all_devices = glob.glob('/dev/ttyUSB*') # Identify which of the devices are LoadStar Serial Sensors sensors = [] for device in all_devices: try: ser = serial.Serial(port=device, timeout=0.5, exclusive=True) ser.write('ID\r') ser.flush() time.sleep(0.05) resp = ser.read(13) ser.close() if len(resp) >= 10 and resp[:len(id_mask)] == id_mask: sensors.append((device, resp.rstrip('\r\n'))) except: continue sensors = sorted(sensors, key=lambda x : x[1]) # Connect to each of the serial devices serials = [] for device, key in sensors: ser = serial.Serial(port=device, timeout=0.5) serials.append(ser) rospy.loginfo('Connected to load cell {} at {}'.format(key, device)) return serials
python
{ "resource": "" }
q12329
WeightPublisher._flush
train
def _flush(self): """Flushes all of the serial ports. """ for ser in self._serials: ser.flush() ser.flushInput() ser.flushOutput() time.sleep(0.02)
python
{ "resource": "" }
q12330
WeightPublisher._read_weights
train
def _read_weights(self): """Reads weights from each of the load cells. """ weights = [] grams_per_pound = 453.592 # Read from each of the sensors for ser in self._serials: ser.write('W\r') ser.flush() time.sleep(0.02) for ser in self._serials: try: output_str = ser.readline() weight = float(output_str) * grams_per_pound weights.append(weight) except: weights.append(0.0) # Log the output log_output = '' for w in weights: log_output +='{:.2f} '.format(w) rospy.loginfo(log_output) return weights
python
{ "resource": "" }
q12331
_Camera.run
train
def run(self): """ Continually write images to the filename specified by a command queue. """ if not self.camera.is_running: self.camera.start() while True: if not self.cmd_q.empty(): cmd = self.cmd_q.get() if cmd[0] == 'stop': self.out.close() self.recording = False elif cmd[0] == 'start': filename = cmd[1] self.out = si.FFmpegWriter(filename) self.recording = True self.count = 0 if self.recording: if self.count == 0: image, _, _ = self.camera.frames() if self.data_buf is None: self.data_buf = np.zeros([1, image.height, image.width, image.channels]) self.data_buf[0,...] = image.raw_data self.out.writeFrame(self.data_buf) self.count += 1 if self.count == self.rate: self.count = 0
python
{ "resource": "" }
q12332
VideoRecorder.start
train
def start(self): """ Starts the camera recording process. """ self._started = True self._camera = _Camera(self._actual_camera, self._cmd_q, self._res, self._codec, self._fps, self._rate) self._camera.start()
python
{ "resource": "" }
q12333
VideoRecorder.start_recording
train
def start_recording(self, output_file): """ Starts recording to a given output video file. Parameters ---------- output_file : :obj:`str` filename to write video to """ if not self._started: raise Exception("Must start the video recorder first by calling .start()!") if self._recording: raise Exception("Cannot record a video while one is already recording!") self._recording = True self._cmd_q.put(('start', output_file))
python
{ "resource": "" }
q12334
VideoRecorder.stop_recording
train
def stop_recording(self): """ Stops writing video to file. """ if not self._recording: raise Exception("Cannot stop a video recording when it's not recording!") self._cmd_q.put(('stop',)) self._recording = False
python
{ "resource": "" }
q12335
VideoRecorder.stop
train
def stop(self): """ Stop the camera process. """ if not self._started: raise Exception("Cannot stop a video recorder before starting it!") self._started = False if self._actual_camera.is_running: self._actual_camera.stop() if self._camera is not None: try: self._camera.terminate() except: pass
python
{ "resource": "" }
q12336
Image._preprocess_data
train
def _preprocess_data(self, data): """Converts a data array to the preferred 3D structure. Parameters ---------- data : :obj:`numpy.ndarray` The data to process. Returns ------- :obj:`numpy.ndarray` The data re-formatted (if needed) as a 3D matrix Raises ------ ValueError If the data is not 1, 2, or 3D to begin with. """ original_type = data.dtype if len(data.shape) == 1: data = data[:, np.newaxis, np.newaxis] elif len(data.shape) == 2: data = data[:, :, np.newaxis] elif len(data.shape) == 0 or len(data.shape) > 3: raise ValueError( 'Illegal data array passed to image. Must be 1, 2, or 3 dimensional numpy array') return data.astype(original_type)
python
{ "resource": "" }
q12337
Image.can_convert
train
def can_convert(x): """ Returns True if x can be converted to an image, False otherwise. """ if len(x.shape) < 2 or len(x.shape) > 3: return False dtype = x.dtype height = x.shape[0] width = x.shape[1] channels = 1 if len(x.shape) == 3: channels = x.shape[2] if channels > 4: return False return True
python
{ "resource": "" }
q12338
Image.from_array
train
def from_array(x, frame='unspecified'): """ Converts an array of data to an Image based on the values in the array and the data format. """ if not Image.can_convert(x): raise ValueError('Cannot convert array to an Image!') dtype = x.dtype height = x.shape[0] width = x.shape[1] channels = 1 if len(x.shape) == 3: channels = x.shape[2] if dtype == np.uint8: if channels == 1: if np.any((x % BINARY_IM_MAX_VAL) > 0): return GrayscaleImage(x, frame) return BinaryImage(x, frame) elif channels == 3: return ColorImage(x, frame) else: raise ValueError( 'No available image conversion for uint8 array with 2 channels') elif dtype == np.uint16: if channels != 1: raise ValueError( 'No available image conversion for uint16 array with 2 or 3 channels') return GrayscaleImage(x, frame) elif dtype == np.float32 or dtype == np.float64: if channels == 1: return DepthImage(x, frame) elif channels == 2: return GdImage(x, frame) elif channels == 3: logging.warning('Converting float array to uint8') return ColorImage(x.astype(np.uint8), frame) return RgbdImage(x, frame) else: raise ValueError( 'Conversion for dtype %s not supported!' % (str(dtype)))
python
{ "resource": "" }
q12339
Image.align
train
def align(self, scale, center, angle, height, width): """ Create a thumbnail from the original image that is scaled by the given factor, centered on the center pixel, oriented along the grasp angle, and cropped to the desired height and width. Parameters ---------- scale : float scale factor to apply center : 2D array array containing the row and column index of the pixel to center on angle : float angle to align the image to height : int height of the final image width : int width of the final image """ # rescale scaled_im = self.resize(scale) # transform cx = scaled_im.center[1] cy = scaled_im.center[0] dx = cx - center[0] * scale dy = cy - center[1] * scale translation = np.array([dy, dx]) tf_im = scaled_im.transform(translation, angle) # crop aligned_im = tf_im.crop(height, width) return aligned_im
python
{ "resource": "" }
q12340
Image.gradients
train
def gradients(self): """Return the gradient as a pair of numpy arrays. Returns ------- :obj:`tuple` of :obj:`numpy.ndarray` of float The gradients of the image along each dimension. """ g = np.gradient(self.data.astype(np.float32)) return g
python
{ "resource": "" }
q12341
Image.linear_to_ij
train
def linear_to_ij(self, linear_inds): """Converts linear indices to row and column coordinates. Parameters ---------- linear_inds : :obj:`numpy.ndarray` of int A list of linear coordinates. Returns ------- :obj:`numpy.ndarray` of int A 2D ndarray whose first entry is the list of row indices and whose second entry is the list of column indices. """ return np.c_[linear_inds / self.width, linear_inds % self.width]
python
{ "resource": "" }
q12342
Image.median_images
train
def median_images(images): """Create a median Image from a list of Images. Parameters ---------- :obj:`list` of :obj:`Image` A list of Image objects. Returns ------- :obj:`Image` A new Image of the same type whose data is the median of all of the images' data. """ images_data = np.array([image.data for image in images]) median_image_data = np.median(images_data, axis=0) an_image = images[0] return type(an_image)( median_image_data.astype( an_image.data.dtype), an_image.frame)
python
{ "resource": "" }
q12343
Image.min_images
train
def min_images(images): """Create a min Image from a list of Images. Parameters ---------- :obj:`list` of :obj:`Image` A list of Image objects. Returns ------- :obj:`Image` A new Image of the same type whose data is the min of all of the images' data. """ images_data = np.array([image.data for image in images]) images_data[images_data == 0] = np.inf min_image_data = np.min(images_data, axis=0) min_image_data[min_image_data == np.inf] = 0.0 an_image = images[0] return type(an_image)( min_image_data.astype( an_image.data.dtype), an_image.frame)
python
{ "resource": "" }
q12344
Image.apply
train
def apply(self, method, *args, **kwargs): """Create a new image by applying a function to this image's data. Parameters ---------- method : :obj:`function` A function to call on the data. This takes in a ndarray as its first argument and optionally takes other arguments. It should return a modified data ndarray. args : arguments Additional args for method. kwargs : keyword arguments Additional keyword arguments for method. Returns ------- :obj:`Image` A new Image of the same type with new data generated by calling method on the current image's data. """ data = method(self.data, *args, **kwargs) return type(self)(data.astype(self.type), self.frame)
python
{ "resource": "" }
q12345
Image.focus
train
def focus(self, height, width, center_i=None, center_j=None): """Zero out all of the image outside of a crop box. Parameters ---------- height : int The height of the desired crop box. width : int The width of the desired crop box. center_i : int The center height point of the crop box. If not specified, the center of the image is used. center_j : int The center width point of the crop box. If not specified, the center of the image is used. Returns ------- :obj:`Image` A new Image of the same type and size that is zeroed out except within the crop box. """ if center_i is None: center_i = self.height / 2 if center_j is None: center_j = self.width / 2 start_row = int(max(0, center_i - height / 2)) end_row = int(min(self.height - 1, center_i + height / 2)) start_col = int(max(0, center_j - width / 2)) end_col = int(min(self.width - 1, center_j + width / 2)) focus_data = np.zeros(self._data.shape) focus_data[start_row:end_row + 1, start_col:end_col + \ 1] = self._data[start_row:end_row + 1, start_col:end_col + 1] return type(self)(focus_data.astype(self._data.dtype), self._frame)
python
{ "resource": "" }
q12346
Image.center_nonzero
train
def center_nonzero(self): """Recenters the image on the mean of the coordinates of nonzero pixels. Returns ------- :obj:`Image` A new Image of the same type and size that is re-centered at the mean location of the non-zero pixels. """ # get the center of the nonzero pixels nonzero_px = np.where(self._data != 0.0) nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]] mean_px = np.mean(nonzero_px, axis=0) center_px = (np.array(self.shape) / 2.0)[:2] diff_px = center_px - mean_px # transform image nonzero_px_tf = nonzero_px + diff_px nonzero_px_tf[:, 0] = np.max( np.c_[np.zeros(nonzero_px_tf[:, 0].shape), nonzero_px_tf[:, 0]], axis=1) nonzero_px_tf[:, 0] = np.min(np.c_[( self.height - 1) * np.ones(nonzero_px_tf[:, 0].shape), nonzero_px_tf[:, 0]], axis=1) nonzero_px_tf[:, 1] = np.max( np.c_[np.zeros(nonzero_px_tf[:, 1].shape), nonzero_px_tf[:, 1]], axis=1) nonzero_px_tf[:, 1] = np.min(np.c_[( self.width - 1) * np.ones(nonzero_px_tf[:, 1].shape), nonzero_px_tf[:, 1]], axis=1) nonzero_px = nonzero_px.astype(np.uint16) nonzero_px_tf = nonzero_px_tf.astype(np.uint16) shifted_data = np.zeros(self.shape) shifted_data[nonzero_px_tf[:, 0], nonzero_px_tf[:, 1], :] = self.data[nonzero_px[:, 0], nonzero_px[:, 1]].reshape(-1, self.channels) return type(self)( shifted_data.astype( self.data.dtype), frame=self._frame), diff_px
python
{ "resource": "" }
q12347
Image.nonzero_pixels
train
def nonzero_pixels(self): """ Return an array of the nonzero pixels. Returns ------- :obj:`numpy.ndarray` Nx2 array of the nonzero pixels """ nonzero_px = np.where(np.sum(self.raw_data, axis=2) > 0) nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]] return nonzero_px
python
{ "resource": "" }
q12348
Image.zero_pixels
train
def zero_pixels(self): """ Return an array of the zero pixels. Returns ------- :obj:`numpy.ndarray` Nx2 array of the zero pixels """ zero_px = np.where(np.sum(self.raw_data, axis=2) == 0) zero_px = np.c_[zero_px[0], zero_px[1]] return zero_px
python
{ "resource": "" }
q12349
Image.nan_pixels
train
def nan_pixels(self): """ Return an array of the NaN pixels. Returns ------- :obj:`numpy.ndarray` Nx2 array of the NaN pixels """ nan_px = np.where(np.isnan(np.sum(self.raw_data, axis=2))) nan_px = np.c_[nan_px[0], nan_px[1]] return nan_px
python
{ "resource": "" }
q12350
Image.finite_pixels
train
def finite_pixels(self): """ Return an array of the finite pixels. Returns ------- :obj:`numpy.ndarray` Nx2 array of the finite pixels """ finite_px = np.where(np.isfinite(self.data)) finite_px = np.c_[finite_px[0], finite_px[1]] return finite_px
python
{ "resource": "" }
q12351
Image.replace_zeros
train
def replace_zeros(self, val, zero_thresh=0.0): """ Replaces all zeros in the image with a specified value Returns ------- image dtype value to replace zeros with """ new_data = self.data.copy() new_data[new_data <= zero_thresh] = val return type(self)(new_data.astype(self.data.dtype), frame=self._frame)
python
{ "resource": "" }
q12352
Image.save
train
def save(self, filename): """Writes the image to a file. Parameters ---------- filename : :obj:`str` The file to save the image to. Must be one of .png, .jpg, .npy, or .npz. Raises ------ ValueError If an unsupported file type is specified. """ filename = str(filename) file_root, file_ext = os.path.splitext(filename) if file_ext in COLOR_IMAGE_EXTS: im_data = self._image_data() if im_data.dtype.type == np.uint8: pil_image = PImage.fromarray(im_data.squeeze()) pil_image.save(filename) else: try: import png except: raise ValueError('PyPNG not installed! Cannot save 16-bit images') png.fromarray(im_data, 'L').save(filename) elif file_ext == '.npy': np.save(filename, self._data) elif file_ext == '.npz': np.savez_compressed(filename, self._data) else: raise ValueError('Extension %s not supported' % (file_ext))
python
{ "resource": "" }
q12353
Image.savefig
train
def savefig(self, output_path, title, dpi=400, format='png', cmap=None): """Write the image to a file using pyplot. Parameters ---------- output_path : :obj:`str` The directory in which to place the file. title : :obj:`str` The title of the file in which to save the image. dpi : int The resolution in dots per inch. format : :obj:`str` The file format to save. Available options include .png, .pdf, .ps, .eps, and .svg. cmap : :obj:`Colormap`, optional A Colormap object fo the pyplot. """ plt.figure() plt.imshow(self.data, cmap=cmap) plt.title(title) plt.axis('off') title_underscore = title.replace(' ', '_') plt.savefig( os.path.join( output_path, '{0}.{1}'.format( title_underscore, format)), dpi=dpi, format=format)
python
{ "resource": "" }
q12354
Image.load_data
train
def load_data(filename): """Loads a data matrix from a given file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. Returns ------- :obj:`numpy.ndarray` The data array read from the file. """ file_root, file_ext = os.path.splitext(filename) data = None if file_ext.lower() in COLOR_IMAGE_EXTS: data = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) elif file_ext == '.npy': data = np.load(filename) elif file_ext == '.npz': data = np.load(filename)['arr_0'] else: raise ValueError('Extension %s not supported' % (file_ext)) return data
python
{ "resource": "" }
q12355
ColorImage._check_valid_data
train
def _check_valid_data(self, data): """Checks that the given data is a uint8 array with one or three channels. Parameters ---------- data : :obj:`numpy.ndarray` The data to check. Raises ------ ValueError If the data is invalid. """ if data.dtype.type is not np.uint8: raise ValueError( 'Illegal data type. Color images only support uint8 arrays') if len(data.shape) != 3 or data.shape[2] != 3: raise ValueError( 'Illegal data type. Color images only support three channels')
python
{ "resource": "" }
q12356
ColorImage.swap_channels
train
def swap_channels(self, channel_swap): """ Swaps the two channels specified in the tuple. Parameters ---------- channel_swap : :obj:`tuple` of int the two channels to swap Returns ------- :obj:`ColorImage` color image with cols swapped """ if len(channel_swap) != 2: raise ValueError('Illegal value for channel swap') ci = channel_swap[0] cj = channel_swap[1] if ci < 0 or ci > 2 or cj < 0 or cj > 2: raise ValueError('Channels must be between 0 and 1') new_data = self.data.copy() new_data[:, :, ci] = self.data[:, :, cj] new_data[:, :, cj] = self.data[:, :, ci] return ColorImage(new_data, frame=self._frame)
python
{ "resource": "" }
q12357
ColorImage.find_chessboard
train
def find_chessboard(self, sx=6, sy=9): """Finds the corners of an sx X sy chessboard in the image. Parameters ---------- sx : int Number of chessboard corners in x-direction. sy : int Number of chessboard corners in y-direction. Returns ------- :obj:`list` of :obj:`numpy.ndarray` A list containing the 2D points of the corners of the detected chessboard, or None if no chessboard found. """ # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((sx * sy, 3), np.float32) objp[:, :2] = np.mgrid[0:sx, 0:sy].T.reshape(-1, 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. # create images img = self.data.astype(np.uint8) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Find the chess board corners ret, corners = cv2.findChessboardCorners(gray, (sx, sy), None) # If found, add object points, image points (after refining them) if ret: objpoints.append(objp) cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) imgpoints.append(corners) if corners is not None: return corners.squeeze() return None
python
{ "resource": "" }
q12358
ColorImage.foreground_mask
train
def foreground_mask( self, tolerance, ignore_black=True, use_hsv=False, scale=8, bgmodel=None): """Creates a binary image mask for the foreground of an image against a uniformly colored background. The background is assumed to be the mode value of the histogram for each of the color channels. Parameters ---------- tolerance : int A +/- level from the detected mean backgroud color. Pixels withing this range will be classified as background pixels and masked out. ignore_black : bool If True, the zero pixels will be ignored when computing the background model. use_hsv : bool If True, image will be converted to HSV for background model generation. scale : int Size of background histogram bins -- there will be BINARY_IM_MAX_VAL/size bins in the color histogram for each channel. bgmodel : :obj:`list` of int A list containing the red, green, and blue channel modes of the background. If this is None, a background model will be generated using the other parameters. Returns ------- :obj:`BinaryImage` A binary image that masks out the background from the current ColorImage. """ # get a background model if bgmodel is None: bgmodel = self.background_model(ignore_black=ignore_black, use_hsv=use_hsv, scale=scale) # get the bounds lower_bound = np.array( [bgmodel[i] - tolerance for i in range(self.channels)]) upper_bound = np.array( [bgmodel[i] + tolerance for i in range(self.channels)]) orig_zero_indices = np.where(np.sum(self._data, axis=2) == 0) # threshold binary_data = cv2.inRange(self.data, lower_bound, upper_bound) binary_data[:, :, ] = (BINARY_IM_MAX_VAL - binary_data[:, :, ]) binary_data[orig_zero_indices[0], orig_zero_indices[1], ] = 0.0 binary_im = BinaryImage(binary_data.astype(np.uint8), frame=self.frame) return binary_im
python
{ "resource": "" }
q12359
ColorImage.background_model
train
def background_model(self, ignore_black=True, use_hsv=False, scale=8): """Creates a background model for the given image. The background color is given by the modes of each channel's histogram. Parameters ---------- ignore_black : bool If True, the zero pixels will be ignored when computing the background model. use_hsv : bool If True, image will be converted to HSV for background model generation. scale : int Size of background histogram bins -- there will be BINARY_IM_MAX_VAL/size bins in the color histogram for each channel. Returns ------- A list containing the red, green, and blue channel modes of the background. """ # hsv color data = self.data if use_hsv: pil_im = PImage.fromarray(self._data) pil_im = pil_im.convert('HSV') data = np.asarray(pil_im) # find the black pixels nonblack_pixels = np.where(np.sum(self.data, axis=2) > 0) r_data = self.r_data g_data = self.g_data b_data = self.b_data if ignore_black: r_data = r_data[nonblack_pixels[0], nonblack_pixels[1]] g_data = g_data[nonblack_pixels[0], nonblack_pixels[1]] b_data = b_data[nonblack_pixels[0], nonblack_pixels[1]] # generate histograms for each channel bounds = (0, np.iinfo(np.uint8).max + 1) num_bins = bounds[1] / scale r_hist, _ = np.histogram(r_data, bins=num_bins, range=bounds) g_hist, _ = np.histogram(g_data, bins=num_bins, range=bounds) b_hist, _ = np.histogram(b_data, bins=num_bins, range=bounds) hists = (r_hist, g_hist, b_hist) # find the thesholds as the modes of the image modes = [0 for i in range(self.channels)] for i in range(self.channels): modes[i] = scale * np.argmax(hists[i]) return modes
python
{ "resource": "" }
q12360
ColorImage.draw_box
train
def draw_box(self, box): """Draw a white box on the image. Parameters ---------- :obj:`autolab_core.Box` A 2D box to draw in the image. Returns ------- :obj:`ColorImage` A new image that is the same as the current one, but with the white box drawn in. """ box_data = self._data.copy() min_i = box.min_pt[1] min_j = box.min_pt[0] max_i = box.max_pt[1] max_j = box.max_pt[0] # draw the vertical lines for j in range(min_j, max_j): box_data[min_i, j, :] = BINARY_IM_MAX_VAL * np.ones(self.channels) box_data[max_i, j, :] = BINARY_IM_MAX_VAL * np.ones(self.channels) # draw the horizontal lines for i in range(min_i, max_i): box_data[i, min_j, :] = BINARY_IM_MAX_VAL * np.ones(self.channels) box_data[i, max_j, :] = BINARY_IM_MAX_VAL * np.ones(self.channels) return ColorImage(box_data, self._frame)
python
{ "resource": "" }
q12361
ColorImage.nonzero_hsv_data
train
def nonzero_hsv_data(self): """ Computes non zero hsv values. Returns ------- :obj:`numpy.ndarray` array of the hsv values for the image """ hsv_data = cv2.cvtColor(self.data, cv2.COLOR_BGR2HSV) nonzero_px = self.nonzero_pixels() return hsv_data[nonzero_px[:, 0], nonzero_px[:, 1], ...]
python
{ "resource": "" }
q12362
ColorImage.segment_kmeans
train
def segment_kmeans(self, rgb_weight, num_clusters, hue_weight=0.0): """ Segment a color image using KMeans based on spatial and color distances. Black pixels will automatically be assigned to their own 'background' cluster. Parameters ---------- rgb_weight : float weighting of RGB distance relative to spatial and hue distance num_clusters : int number of clusters to use hue_weight : float weighting of hue from hsv relative to spatial and RGB distance Returns ------- :obj:`SegmentationImage` image containing the segment labels """ # form features array label_offset = 1 nonzero_px = np.where(self.data != 0.0) nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]] # get hsv data if specified color_vals = rgb_weight * \ self._data[nonzero_px[:, 0], nonzero_px[:, 1], :] if hue_weight > 0.0: hsv_data = cv2.cvtColor(self.data, cv2.COLOR_BGR2HSV) color_vals = np.c_[color_vals, hue_weight * hsv_data[nonzero_px[:, 0], nonzero_px[:, 1], :1]] features = np.c_[nonzero_px, color_vals.astype(np.float32)] # perform KMeans clustering kmeans = sc.KMeans(n_clusters=num_clusters) labels = kmeans.fit_predict(features) # create output label array label_im = np.zeros([self.height, self.width]).astype(np.uint8) label_im[nonzero_px[:, 0], nonzero_px[:, 1]] = labels + label_offset return SegmentationImage(label_im, frame=self.frame)
python
{ "resource": "" }
q12363
ColorImage.to_grayscale
train
def to_grayscale(self): """Converts the color image to grayscale using OpenCV. Returns ------- :obj:`GrayscaleImage` Grayscale image corresponding to original color image. """ gray_data = cv2.cvtColor(self.data, cv2.COLOR_RGB2GRAY) return GrayscaleImage(gray_data, frame=self.frame)
python
{ "resource": "" }
q12364
ColorImage.open
train
def open(filename, frame='unspecified'): """Creates a ColorImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`ColorImage` The new color image. """ data = Image.load_data(filename).astype(np.uint8) return ColorImage(data, frame)
python
{ "resource": "" }
q12365
DepthImage._check_valid_data
train
def _check_valid_data(self, data): """Checks that the given data is a float array with one channel. Parameters ---------- data : :obj:`numpy.ndarray` The data to check. Raises ------ ValueError If the data is invalid. """ if data.dtype.type is not np.float32 and \ data.dtype.type is not np.float64: raise ValueError( 'Illegal data type. Depth images only support float arrays') if len(data.shape) == 3 and data.shape[2] != 1: raise ValueError( 'Illegal data type. Depth images only support single channel')
python
{ "resource": "" }
q12366
DepthImage.threshold
train
def threshold(self, front_thresh=0.0, rear_thresh=100.0): """Creates a new DepthImage by setting all depths less than front_thresh and greater than rear_thresh to 0. Parameters ---------- front_thresh : float The lower-bound threshold. rear_thresh : float The upper bound threshold. Returns ------- :obj:`DepthImage` A new DepthImage created from the thresholding operation. """ data = np.copy(self._data) data[data < front_thresh] = 0.0 data[data > rear_thresh] = 0.0 return DepthImage(data, self._frame)
python
{ "resource": "" }
q12367
DepthImage.threshold_gradients
train
def threshold_gradients(self, grad_thresh): """Creates a new DepthImage by zeroing out all depths where the magnitude of the gradient at that point is greater than grad_thresh. Parameters ---------- grad_thresh : float A threshold for the gradient magnitude. Returns ------- :obj:`DepthImage` A new DepthImage created from the thresholding operation. """ data = np.copy(self._data) gx, gy = self.gradients() gradients = np.zeros([gx.shape[0], gx.shape[1], 2]) gradients[:, :, 0] = gx gradients[:, :, 1] = gy gradient_mags = np.linalg.norm(gradients, axis=2) ind = np.where(gradient_mags > grad_thresh) data[ind[0], ind[1]] = 0.0 return DepthImage(data, self._frame)
python
{ "resource": "" }
q12368
DepthImage.threshold_gradients_pctile
train
def threshold_gradients_pctile(self, thresh_pctile, min_mag=0.0): """Creates a new DepthImage by zeroing out all depths where the magnitude of the gradient at that point is greater than some percentile of all gradients. Parameters ---------- thresh_pctile : float percentile to threshold all gradients above min_mag : float minimum magnitude of the gradient Returns ------- :obj:`DepthImage` A new DepthImage created from the thresholding operation. """ data = np.copy(self._data) gx, gy = self.gradients() gradients = np.zeros([gx.shape[0], gx.shape[1], 2]) gradients[:, :, 0] = gx gradients[:, :, 1] = gy gradient_mags = np.linalg.norm(gradients, axis=2) grad_thresh = np.percentile(gradient_mags, thresh_pctile) ind = np.where( (gradient_mags > grad_thresh) & ( gradient_mags > min_mag)) data[ind[0], ind[1]] = 0.0 return DepthImage(data, self._frame)
python
{ "resource": "" }
q12369
DepthImage.invalid_pixel_mask
train
def invalid_pixel_mask(self): """ Returns a binary mask for the NaN- and zero-valued pixels. Serves as a mask for invalid pixels. Returns ------- :obj:`BinaryImage` Binary image where a pixel value greater than zero indicates an invalid pixel. """ # init mask buffer mask = np.zeros([self.height, self.width, 1]).astype(np.uint8) # update invalid pixels zero_pixels = self.zero_pixels() nan_pixels = self.nan_pixels() mask[zero_pixels[:, 0], zero_pixels[:, 1]] = BINARY_IM_MAX_VAL mask[nan_pixels[:, 0], nan_pixels[:, 1]] = BINARY_IM_MAX_VAL return BinaryImage(mask, frame=self.frame)
python
{ "resource": "" }
q12370
DepthImage.pixels_farther_than
train
def pixels_farther_than(self, depth_im, filter_equal_depth=False): """ Returns the pixels that are farther away than those in the corresponding depth image. Parameters ---------- depth_im : :obj:`DepthImage` depth image to query replacement with filter_equal_depth : bool whether or not to mark depth values that are equal Returns ------- :obj:`numpy.ndarray` the pixels """ # take closest pixel if filter_equal_depth: farther_px = np.where((self.data > depth_im.data) & (np.isfinite(depth_im.data))) else: farther_px = np.where((self.data >= depth_im.data) & (np.isfinite(depth_im.data))) farther_px = np.c_[farther_px[0], farther_px[1]] return farther_px
python
{ "resource": "" }
q12371
DepthImage.combine_with
train
def combine_with(self, depth_im): """ Replaces all zeros in the source depth image with the value of a different depth image Parameters ---------- depth_im : :obj:`DepthImage` depth image to combine with Returns ------- :obj:`DepthImage` the combined depth image """ new_data = self.data.copy() # replace zero pixels new_data[new_data == 0] = depth_im.data[new_data == 0] # take closest pixel new_data[(new_data > depth_im.data) & (depth_im.data > 0)] = depth_im.data[( new_data > depth_im.data) & (depth_im.data > 0)] return DepthImage(new_data, frame=self.frame)
python
{ "resource": "" }
q12372
DepthImage.to_binary
train
def to_binary(self, threshold=0.0): """Creates a BinaryImage from the depth image. Points where the depth is greater than threshold are converted to ones, and all other points are zeros. Parameters ---------- threshold : float The depth threshold. Returns ------- :obj:`BinaryImage` A BinaryImage where all 1 points had a depth greater than threshold in the DepthImage. """ data = BINARY_IM_MAX_VAL * (self._data > threshold) return BinaryImage(data.astype(np.uint8), self._frame)
python
{ "resource": "" }
q12373
DepthImage.to_color
train
def to_color(self, normalize=False): """ Convert to a color image. Parameters ---------- normalize : bool whether or not to normalize by the maximum depth Returns ------- :obj:`ColorImage` color image corresponding to the depth image """ im_data = self._image_data(normalize=normalize) return ColorImage(im_data, frame=self._frame)
python
{ "resource": "" }
q12374
DepthImage.to_float
train
def to_float(self): """ Converts to 32-bit data. Returns ------- :obj:`DepthImage` depth image with 32 bit float data """ return DepthImage(self.data.astype(np.float32), frame=self.frame)
python
{ "resource": "" }
q12375
DepthImage.point_normal_cloud
train
def point_normal_cloud(self, camera_intr): """Computes a PointNormalCloud from the depth image. Parameters ---------- camera_intr : :obj:`CameraIntrinsics` The camera parameters on which this depth image was taken. Returns ------- :obj:`autolab_core.PointNormalCloud` A PointNormalCloud created from the depth image. """ point_cloud_im = camera_intr.deproject_to_image(self) normal_cloud_im = point_cloud_im.normal_cloud_im() point_cloud = point_cloud_im.to_point_cloud() normal_cloud = normal_cloud_im.to_normal_cloud() return PointNormalCloud( point_cloud.data, normal_cloud.data, frame=self._frame)
python
{ "resource": "" }
q12376
DepthImage.open
train
def open(filename, frame='unspecified'): """Creates a DepthImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`DepthImage` The new depth image. """ file_root, file_ext = os.path.splitext(filename) data = Image.load_data(filename) if file_ext.lower() in COLOR_IMAGE_EXTS: data = (data * (MAX_DEPTH / BINARY_IM_MAX_VAL)).astype(np.float32) return DepthImage(data, frame)
python
{ "resource": "" }
q12377
IrImage.open
train
def open(filename, frame='unspecified'): """Creates an IrImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`IrImage` The new IR image. """ data = Image.load_data(filename) data = (data * (MAX_IR / BINARY_IM_MAX_VAL)).astype(np.uint16) return IrImage(data, frame)
python
{ "resource": "" }
q12378
GrayscaleImage.to_color
train
def to_color(self): """Convert the grayscale image to a ColorImage. Returns ------- :obj:`ColorImage` A color image equivalent to the grayscale one. """ color_data = np.repeat(self.data[:,:,np.newaxis], 3, axis=2) return ColorImage(color_data, self._frame)
python
{ "resource": "" }
q12379
GrayscaleImage.open
train
def open(filename, frame='unspecified'): """Creates a GrayscaleImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`GrayscaleImage` The new grayscale image. """ data = Image.load_data(filename) return GrayscaleImage(data, frame)
python
{ "resource": "" }
q12380
BinaryImage.pixelwise_or
train
def pixelwise_or(self, binary_im): """ Takes OR operation with other binary image. Parameters ---------- binary_im : :obj:`BinaryImage` binary image for and operation Returns ------- :obj:`BinaryImage` OR of this binary image and other image """ data = np.copy(self._data) ind = np.where(binary_im.data > 0) data[ind[0], ind[1], ...] = BINARY_IM_MAX_VAL return BinaryImage(data, self._frame)
python
{ "resource": "" }
q12381
BinaryImage.contour_mask
train
def contour_mask(self, contour): """ Generates a binary image with only the given contour filled in. """ # fill in new data new_data = np.zeros(self.data.shape) num_boundary = contour.boundary_pixels.shape[0] boundary_px_ij_swapped = np.zeros([num_boundary, 1, 2]) boundary_px_ij_swapped[:, 0, 0] = contour.boundary_pixels[:, 1] boundary_px_ij_swapped[:, 0, 1] = contour.boundary_pixels[:, 0] cv2.fillPoly( new_data, pts=[ boundary_px_ij_swapped.astype( np.int32)], color=( BINARY_IM_MAX_VAL, BINARY_IM_MAX_VAL, BINARY_IM_MAX_VAL)) orig_zeros = np.where(self.data == 0) new_data[orig_zeros[0], orig_zeros[1]] = 0 return BinaryImage(new_data.astype(np.uint8), frame=self._frame)
python
{ "resource": "" }
q12382
BinaryImage.boundary_map
train
def boundary_map(self): """ Computes the boundary pixels in the image and sets them to nonzero values. Returns ------- :obj:`BinaryImage` binary image with nonzeros on the boundary of the original image """ # compute contours contours = self.find_contours() # fill in nonzero pixels new_data = np.zeros(self.data.shape) for contour in contours: new_data[contour.boundary_pixels[:, 0].astype(np.uint8), contour.boundary_pixels[:, 1].astype(np.uint8)] = np.iinfo(np.uint8).max return BinaryImage(new_data.astype(np.uint8), frame=self.frame)
python
{ "resource": "" }
q12383
BinaryImage.most_free_pixel
train
def most_free_pixel(self): """ Find the black pixel with the largest distance from the white pixels. Returns ------- :obj:`numpy.ndarray` 2-vector containing the most free pixel """ dist_tf = self.to_distance_im() max_px = np.where(dist_tf == np.max(dist_tf)) free_pixel = np.array([max_px[0][0], max_px[1][0]]) return free_pixel
python
{ "resource": "" }
q12384
BinaryImage.diff_with_target
train
def diff_with_target(self, binary_im): """ Creates a color image to visualize the overlap between two images. Nonzero pixels that match in both images are green. Nonzero pixels of this image that aren't in the other image are yellow Nonzero pixels of the other image that aren't in this image are red Parameters ---------- binary_im : :obj:`BinaryImage` binary image to take the difference with Returns ------- :obj:`ColorImage` color image to visualize the image difference """ red = np.array([BINARY_IM_MAX_VAL, 0, 0]) yellow = np.array([BINARY_IM_MAX_VAL, BINARY_IM_MAX_VAL, 0]) green = np.array([0, BINARY_IM_MAX_VAL, 0]) overlap_data = np.zeros([self.height, self.width, 3]) unfilled_px = np.where((self.data == 0) & (binary_im.data > 0)) overlap_data[unfilled_px[0], unfilled_px[1], :] = red filled_px = np.where((self.data > 0) & (binary_im.data > 0)) overlap_data[filled_px[0], filled_px[1], :] = green spurious_px = np.where((self.data > 0) & (binary_im.data == 0)) overlap_data[spurious_px[0], spurious_px[1], :] = yellow return ColorImage(overlap_data.astype(np.uint8), frame=self.frame)
python
{ "resource": "" }
q12385
BinaryImage.num_adjacent
train
def num_adjacent(self, i, j): """ Counts the number of adjacent nonzero pixels to a given pixel. Parameters ---------- i : int row index of query pixel j : int col index of query pixel Returns ------- int number of adjacent nonzero pixels """ # check values if i < 1 or i > self.height - 2 or j < 1 and j > self.width - 2: raise ValueError('Pixels out of bounds') # count the number of blacks count = 0 diffs = [[-1, 0], [1, 0], [0, -1], [0, 1]] for d in diffs: if self.data[i + d[0]][j + d[1]] > self._threshold: count += 1 return count
python
{ "resource": "" }
q12386
BinaryImage.to_sdf
train
def to_sdf(self): """ Converts the 2D image to a 2D signed distance field. Returns ------- :obj:`numpy.ndarray` 2D float array of the signed distance field """ # compute medial axis transform skel, sdf_in = morph.medial_axis(self.data, return_distance=True) useless_skel, sdf_out = morph.medial_axis( np.iinfo(np.uint8).max - self.data, return_distance=True) # convert to true sdf sdf = sdf_out - sdf_in return sdf
python
{ "resource": "" }
q12387
BinaryImage.to_color
train
def to_color(self): """Creates a ColorImage from the binary image. Returns ------- :obj:`ColorImage` The newly-created color image. """ color_data = np.zeros([self.height, self.width, 3]) color_data[:, :, 0] = self.data color_data[:, :, 1] = self.data color_data[:, :, 2] = self.data return ColorImage(color_data.astype(np.uint8), self._frame)
python
{ "resource": "" }
q12388
BinaryImage.open
train
def open(filename, frame='unspecified'): """Creates a BinaryImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`BinaryImage` The new binary image. """ data = Image.load_data(filename) if len(data.shape) > 2 and data.shape[2] > 1: data = data[:, :, 0] return BinaryImage(data, frame)
python
{ "resource": "" }
q12389
RgbdImage._check_valid_data
train
def _check_valid_data(self, data): """Checks that the given data is a float array with four channels. Parameters ---------- data : :obj:`numpy.ndarray` The data to check. Raises ------ ValueError If the data is invalid. """ if data.dtype.type is not np.float32 and \ data.dtype.type is not np.float64: raise ValueError( 'Illegal data type. RGB-D images only support float arrays') if len(data.shape) != 3 and data.shape[2] != 4: raise ValueError( 'Illegal data type. RGB-D images only support four channel') color_data = data[:, :, :3] if np.any((color_data < 0) | (color_data > BINARY_IM_MAX_VAL)): raise ValueError( 'Color channels must be in the range (0, BINARY_IM_MAX_VAL)')
python
{ "resource": "" }
q12390
RgbdImage.from_color_and_depth
train
def from_color_and_depth(color_im, depth_im): """ Creates an RGB-D image from a separate color and depth image. """ # check shape if color_im.height != depth_im.height or color_im.width != depth_im.width: raise ValueError('Color and depth images must have the same shape') # check frame if color_im.frame != depth_im.frame: raise ValueError('Color and depth images must have the same frame') # form composite data rgbd_data = np.zeros([color_im.height, color_im.width, 4]) rgbd_data[:, :, :3] = color_im.data.astype(np.float64) rgbd_data[:, :, 3] = depth_im.data return RgbdImage(rgbd_data, frame=color_im.frame)
python
{ "resource": "" }
q12391
RgbdImage.color
train
def color(self): """ Returns the color image. """ return ColorImage(self.raw_data[:, :, :3].astype( np.uint8), frame=self.frame)
python
{ "resource": "" }
q12392
RgbdImage.combine_with
train
def combine_with(self, rgbd_im): """ Replaces all zeros in the source rgbd image with the values of a different rgbd image Parameters ---------- rgbd_im : :obj:`RgbdImage` rgbd image to combine with Returns ------- :obj:`RgbdImage` the combined rgbd image """ new_data = self.data.copy() depth_data = self.depth.data other_depth_data = rgbd_im.depth.data depth_zero_px = self.depth.zero_pixels() depth_replace_px = np.where( (other_depth_data != 0) & ( other_depth_data < depth_data)) depth_replace_px = np.c_[depth_replace_px[0], depth_replace_px[1]] # replace zero pixels new_data[depth_zero_px[:, 0], depth_zero_px[:, 1], :] = rgbd_im.data[depth_zero_px[:, 0], depth_zero_px[:, 1], :] # take closest pixel new_data[depth_replace_px[:, 0], depth_replace_px[:, 1], :] = rgbd_im.data[depth_replace_px[:, 0], depth_replace_px[:, 1], :] return RgbdImage(new_data, frame=self.frame)
python
{ "resource": "" }
q12393
GdImage.from_grayscale_and_depth
train
def from_grayscale_and_depth(gray_im, depth_im): """ Creates an G-D image from a separate grayscale and depth image. """ # check shape if gray_im.height != depth_im.height or gray_im.width != depth_im.width: raise ValueError( 'Grayscale and depth images must have the same shape') # check frame if gray_im.frame != depth_im.frame: raise ValueError( 'Grayscale and depth images must have the same frame') # form composite data gd_data = np.zeros([gray_im.height, gray_im.width, 2]) gd_data[:, :, 0] = gray_im.data.astype(np.float64) gd_data[:, :, 1] = depth_im.data return GdImage(gd_data, frame=gray_im.frame)
python
{ "resource": "" }
q12394
GdImage.gray
train
def gray(self): """ Returns the grayscale image. """ return GrayscaleImage( self.raw_data[:, :, 0].astype(np.uint8), frame=self.frame)
python
{ "resource": "" }
q12395
SegmentationImage.border_pixels
train
def border_pixels( self, grad_sigma=0.5, grad_lower_thresh=0.1, grad_upper_thresh=1.0): """ Returns the pixels on the boundary between all segments, excluding the zero segment. Parameters ---------- grad_sigma : float standard deviation used for gaussian gradient filter grad_lower_thresh : float lower threshold on gradient threshold used to determine the boundary pixels grad_upper_thresh : float upper threshold on gradient threshold used to determine the boundary pixels Returns ------- :obj:`numpy.ndarray` Nx2 array of pixels on the boundary """ # boundary pixels boundary_im = np.ones(self.shape) for i in range(1, self.num_segments): label_border_im = self.data.copy() label_border_im[self.data == 0] = i grad_mag = sf.gaussian_gradient_magnitude( label_border_im.astype(np.float32), sigma=grad_sigma) nonborder_px = np.where( (grad_mag < grad_lower_thresh) | ( grad_mag > grad_upper_thresh)) boundary_im[nonborder_px[0], nonborder_px[1]] = 0 # return boundary pixels border_px = np.where(boundary_im > 0) border_px = np.c_[border_px[0], border_px[1]] return border_px
python
{ "resource": "" }
q12396
SegmentationImage.segment_mask
train
def segment_mask(self, segnum): """ Returns a binary image of just the segment corresponding to the given number. Parameters ---------- segnum : int the number of the segment to generate a mask for Returns ------- :obj:`BinaryImage` binary image data """ binary_data = np.zeros(self.shape) binary_data[self.data == segnum] = BINARY_IM_MAX_VAL return BinaryImage(binary_data.astype(np.uint8), frame=self.frame)
python
{ "resource": "" }
q12397
SegmentationImage.open
train
def open(filename, frame='unspecified'): """ Opens a segmentation image """ data = Image.load_data(filename) return SegmentationImage(data, frame)
python
{ "resource": "" }
q12398
PointCloudImage.to_point_cloud
train
def to_point_cloud(self): """Convert the image to a PointCloud object. Returns ------- :obj:`autolab_core.PointCloud` The corresponding PointCloud. """ return PointCloud( data=self._data.reshape( self.height * self.width, 3).T, frame=self._frame)
python
{ "resource": "" }
q12399
PointCloudImage.normal_cloud_im
train
def normal_cloud_im(self, ksize=3): """Generate a NormalCloudImage from the PointCloudImage using Sobel filtering. Parameters ---------- ksize : int Size of the kernel to use for derivative computation Returns ------- :obj:`NormalCloudImage` The corresponding NormalCloudImage. """ # compute direction via cross product of derivatives gy = cv2.Sobel(self.data, cv2.CV_64F, 1, 0, ksize=ksize) gx = cv2.Sobel(self.data, cv2.CV_64F, 0, 1, ksize=ksize) gx_data = gx.reshape(self.height * self.width, 3) gy_data = gy.reshape(self.height * self.width, 3) pc_grads = np.cross(gx_data, gy_data) # default to point toward camera # normalize pc_grad_norms = np.linalg.norm(pc_grads, axis=1) pc_grads[pc_grad_norms > 0] = pc_grads[pc_grad_norms > 0] / np.tile(pc_grad_norms[pc_grad_norms > 0, np.newaxis], [1, 3]) pc_grads[pc_grad_norms == 0.0] = np.array([0,0,-1.0]) # zero norm means pointing toward camera # reshape normal_im_data = pc_grads.reshape(self.height, self.width, 3) # preserve zeros zero_px = self.zero_pixels() normal_im_data[zero_px[:,0], zero_px[:,1], :] = np.zeros(3) return NormalCloudImage(normal_im_data, frame=self.frame)
python
{ "resource": "" }