code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
if isinstance(content, pathlib.Path):
if not mimetype:
mimetype = guess_type(content.name)[0]
with content.open('rb') as fp:
content = fp.read()
else:
if isinstance(content, text_type):
content = content.encode('utf8')
return "data:{0};base64,{1}".format(
mimetype or 'application/octet-stream', b64encode(content).decode())
|
def data_url(content, mimetype=None)
|
Returns content encoded as base64 Data URI.
:param content: bytes or str or Path
:param mimetype: mimetype for
:return: str object (consisting only of ASCII, though)
.. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme
| 2.141962
| 2.341712
| 0.914699
|
if PY3: # pragma: no cover
return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding)
return binary_type(s)
|
def to_binary(s, encoding='utf8')
|
Portable cast function.
In python 2 the ``str`` function which is used to coerce objects to bytes does not
accept an encoding argument, whereas python 3's ``bytes`` function requires one.
:param s: object to be converted to binary_type
:return: binary_type instance, representing s.
| 3.41252
| 4.428472
| 0.770586
|
def f(s):
if _filter:
return _filter(s)
return s is not None
d = d or {}
for k, v in iteritems(kw):
if f(v):
d[k] = v
return d
|
def dict_merged(d, _filter=None, **kw)
|
Update dictionary d with the items passed as kw if the value passes _filter.
| 3.300447
| 3.280133
| 1.006193
|
invalid = list(range(0x9))
invalid.extend([0xb, 0xc])
invalid.extend(range(0xe, 0x20))
return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text)
|
def xmlchars(text)
|
Not all of UTF-8 is considered valid character data in XML ...
Thus, this function can be used to remove illegal characters from ``text``.
| 3.99477
| 4.06838
| 0.981907
|
res = ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
if lowercase:
res = res.lower()
for c in string.punctuation:
res = res.replace(c, '')
res = re.sub('\s+', '' if remove_whitespace else ' ', res)
res = res.encode('ascii', 'ignore').decode('ascii')
assert re.match('[ A-Za-z0-9]*$', res)
return res
|
def slug(s, remove_whitespace=True, lowercase=True)
|
Condensed version of s, containing only lowercase alphanumeric characters.
| 2.39986
| 2.38567
| 1.005948
|
assert isinstance(string, string_types) or isinstance(string, binary_type)
if isinstance(string, text_type):
return string.encode(encoding)
try:
# make sure the string can be decoded in the specified encoding ...
string.decode(encoding)
return string
except UnicodeDecodeError:
# ... if not use latin1 as best guess to decode the string before encoding as
# specified.
return string.decode('latin1').encode(encoding)
|
def encoded(string, encoding='utf-8')
|
Cast string to binary_type.
:param string: six.binary_type or six.text_type
:param encoding: encoding which the object is forced to
:return: six.binary_type
| 3.846063
| 4.28606
| 0.897342
|
if comment:
strip = True
if isinstance(p, (list, tuple)):
res = [l.decode(encoding) if encoding else l for l in p]
else:
with Path(p).open(encoding=encoding or 'utf-8') as fp:
res = fp.readlines()
if strip:
res = [l.strip() or None for l in res]
if comment:
res = [None if l and l.startswith(comment) else l for l in res]
if normalize:
res = [unicodedata.normalize(normalize, l) if l else l for l in res]
if linenumbers:
return [(n, l) for n, l in enumerate(res, 1)]
return [l for l in res if l is not None]
|
def readlines(p,
encoding=None,
strip=False,
comment=None,
normalize=None,
linenumbers=False)
|
Read a `list` of lines from a text file.
:param p: File path (or `list` or `tuple` of text)
:param encoding: Registered codec.
:param strip: If `True`, strip leading and trailing whitespace.
:param comment: String used as syntax to mark comment lines. When not `None`, \
commented lines will be stripped. This implies `strip=True`.
:param normalize: 'NFC', 'NFKC', 'NFD', 'NFKD'
:param linenumbers: return also line numbers.
:return: `list` of text lines or pairs (`int`, text or `None`).
| 2.100386
| 2.180654
| 0.963191
|
for dirpath, dirnames, filenames in os.walk(as_posix(p), **kw):
if mode in ('all', 'dirs'):
for dirname in dirnames:
yield Path(dirpath).joinpath(dirname)
if mode in ('all', 'files'):
for fname in filenames:
yield Path(dirpath).joinpath(fname)
|
def walk(p, mode='all', **kw)
|
Wrapper for `os.walk`, yielding `Path` objects.
:param p: root of the directory tree to walk.
:param mode: 'all|dirs|files', defaulting to 'all'.
:param kw: Keyword arguments are passed to `os.walk`.
:return: Generator for the requested Path objects.
| 2.08201
| 2.113132
| 0.985272
|
m = max(itertools.chain(map(len, self), [0]))
fields = (" %s = {%s}" % (k.ljust(m), self[k]) for k in self)
return "@%s{%s,\n%s\n}" % (
getattr(self.genre, 'value', self.genre), self.id, ",\n".join(fields))
|
def bibtex(self)
|
Represent the source in BibTeX format.
:return: string encoding the source in BibTeX syntax.
| 4.681001
| 4.969867
| 0.941877
|
genre = getattr(self.genre, 'value', self.genre)
pages_at_end = genre in (
'book',
'phdthesis',
'mastersthesis',
'misc',
'techreport')
thesis = genre in ('phdthesis', 'mastersthesis')
if self.get('editor'):
editors = self['editor']
affix = 'eds' if ' and ' in editors or '&' in editors else 'ed'
editors = " %s (%s.)" % (editors, affix)
else:
editors = None
res = [self.get('author', editors), self.get('year', 'n.d')]
if genre == 'book':
res.append(self.get_with_translation('booktitle') or
self.get_with_translation('title'))
series = ', '.join(filter(None, [self.get('series'), self.get('volume')]))
if series:
res.append('(%s.)' % series)
elif genre == 'misc':
# in case of misc records, we use the note field in case a title is missing.
res.append(self.get_with_translation('title') or self.get('note'))
else:
res.append(self.get_with_translation('title'))
if genre == 'article':
atom = ' '.join(filter(None, [self.get('journal'), self.get('volume')]))
if self.get('issue'):
atom += '(%s)' % self['issue']
res.append(atom)
res.append(self.get('pages'))
elif genre == 'incollection' or genre == 'inproceedings':
prefix = 'In'
atom = ''
if editors:
atom += editors
if self.get('booktitle'):
if atom:
atom += ','
atom += " %s" % self.get_with_translation('booktitle')
if self.get('pages'):
atom += ", %s" % self['pages']
res.append(prefix + atom)
else:
# check for author to make sure we haven't included the editors yet.
if editors and self.get('author'):
res.append("In %s" % editors)
for attr in [
'journal',
'volume' if genre != 'book' else None,
]:
if attr and self.get(attr):
res.append(self.get(attr))
if self.get('issue'):
res.append("(%s)" % self['issue'])
if not pages_at_end and self.get('pages'):
res.append(self['pages'])
if self.get('publisher'):
res.append(": ".join(filter(None, [self.get('address'), self['publisher']])))
else:
if genre == 'misc' and self.get('howpublished'):
res.append(self.get('howpublished'))
if not thesis and pages_at_end and self.get('pages'):
res.append(self['pages'] + 'pp')
note = self.get('note') or self._genre_note.get(genre)
if note and note not in res:
if thesis:
joiner = ','
if self.get('school'):
note += '{0} {1}'.format(joiner, self.get('school'))
joiner = ';'
if self.get('pages'):
note += '{0} {1}pp.'.format(joiner, self.get('pages'))
res.append('(%s)' % note)
return ' '.join(
x if x.endswith(('.', '.)')) else '%s.' % x for x in res if x)
|
def text(self)
|
Linearize the bib source according to the rules of the unified style.
Book:
author. year. booktitle. (series, volume.) address: publisher.
Article:
author. year. title. journal volume(issue). pages.
Incollection:
author. year. title. In editor (ed.), booktitle, pages. address: publisher.
.. seealso::
http://celxj.org/downloads/UnifiedStyleSheet.pdf
https://github.com/citation-style-language/styles/blob/master/\
unified-style-linguistics.csl
| 3.045892
| 3.01156
| 1.0114
|
response_text = self._get_http_client(type).request(path, method, params)
if not response_text:
return response_text
response_json = json.loads(response_text)
if 'errors' in response_json:
raise (ErrorException([Error().load(e) for e in response_json['errors']]))
return response_json
|
def request(self, path, method='GET', params=None, type=REST_TYPE)
|
Builds a request, gets a response and decodes it.
| 3.520015
| 3.532767
| 0.99639
|
return HLR().load(self.request('hlr', 'POST', {'msisdn': msisdn, 'reference': reference}))
|
def hlr_create(self, msisdn, reference)
|
Perform a new HLR lookup.
| 5.189696
| 4.997866
| 1.038382
|
if params is None: params = {}
if type(recipients) == list:
recipients = ','.join(recipients)
params.update({'originator': originator, 'body': body, 'recipients': recipients})
return Message().load(self.request('messages', 'POST', params))
|
def message_create(self, originator, recipients, body, params=None)
|
Create a new message.
| 3.242727
| 3.14038
| 1.032591
|
if params is None: params = {}
if type(recipients) == list:
recipients = ','.join(recipients)
params.update({'recipients': recipients, 'body': body})
return VoiceMessage().load(self.request('voicemessages', 'POST', params))
|
def voice_message_create(self, recipients, body, params=None)
|
Create a new voice message.
| 3.699452
| 3.635488
| 1.017594
|
if params is None: params = {}
return Lookup().load(self.request('lookup/' + str(phonenumber), 'GET', params))
|
def lookup(self, phonenumber, params=None)
|
Do a new lookup.
| 8.265169
| 8.007254
| 1.03221
|
if params is None: params = {}
return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr', 'GET', params))
|
def lookup_hlr(self, phonenumber, params=None)
|
Retrieve the information of a specific HLR lookup.
| 6.343312
| 4.962026
| 1.278371
|
if params is None: params = {}
return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr', 'POST', params))
|
def lookup_hlr_create(self, phonenumber, params=None)
|
Perform a new HLR lookup.
| 5.702344
| 5.231503
| 1.090001
|
if params is None: params = {}
params.update({'recipient': recipient})
return Verify().load(self.request('verify', 'POST', params))
|
def verify_create(self, recipient, params=None)
|
Create a new verification.
| 6.212423
| 5.289098
| 1.174571
|
return Verify().load(self.request('verify/' + str(id), params={'token': token}))
|
def verify_verify(self, id, token)
|
Verify the token of a specific verification.
| 12.004441
| 9.714843
| 1.23568
|
items = []
for item in value:
items.append(self.itemType().load(item))
self._items = items
|
def items(self, value)
|
Create typed objects from the dicts.
| 5.300447
| 4.605425
| 1.150914
|
if params is None: params = {}
url = urljoin(self.endpoint, path)
headers = {
'Accept': 'application/json',
'Authorization': 'AccessKey ' + self.access_key,
'User-Agent': self.user_agent,
'Content-Type': 'application/json'
}
if method == 'DELETE':
response = requests.delete(url, verify=True, headers=headers, data=json.dumps(params))
elif method == 'GET':
response = requests.get(url, verify=True, headers=headers, params=params)
elif method == 'PATCH':
response = requests.patch(url, verify=True, headers=headers, data=json.dumps(params))
elif method == 'POST':
response = requests.post(url, verify=True, headers=headers, data=json.dumps(params))
elif method == 'PUT':
response = requests.put(url, verify=True, headers=headers, data=json.dumps(params))
else:
raise ValueError(str(method) + ' is not a supported HTTP method')
if response.status_code in self.__supported_status_codes:
response_text = response.text
else:
response.raise_for_status()
return response_text
|
def request(self, path, method='GET', params=None)
|
Builds a request and gets a response.
| 1.613026
| 1.601535
| 1.007175
|
config_h = opj("src", "cysignals", "cysignals_config.h")
if not os.path.isfile(config_h):
import subprocess
subprocess.check_call(["make", "configure"])
subprocess.check_call(["sh", "configure"])
dist = self.distribution
ext_modules = dist.ext_modules
if ext_modules:
dist.ext_modules[:] = self.cythonize(ext_modules)
_build.run(self)
|
def run(self)
|
Run ``./configure`` and Cython first.
| 4.68738
| 3.809688
| 1.230384
|
# Search all subdirectories of sys.path directories for a
# "cython_debug" directory. Note that sys_path is a variable set by
# cysignals-CSI. It may differ from sys.path if GDB is run with a
# different Python interpreter.
files = []
for path in sys_path: # noqa
pattern = os.path.join(path, '*', 'cython_debug', 'cython_debug_info_*')
files.extend(glob.glob(pattern))
return files
|
def cython_debug_files()
|
Cython extra debug information files
| 8.028732
| 7.690939
| 1.043921
|
running = self._webcam.start()
if not running:
return running
running &= self._phoxi.start()
if not running:
self._webcam.stop()
return running
|
def start(self)
|
Start the sensor.
| 6.545506
| 6.461855
| 1.012945
|
# Check that everything is running
if not self._running:
logging.warning('Colorized PhoXi not running. Aborting stop')
return False
self._webcam.stop()
self._phoxi.stop()
return True
|
def stop(self)
|
Stop the sensor.
| 11.482861
| 11.340666
| 1.012538
|
_, phoxi_depth_im, _ = self._phoxi.frames()
webcam_color_im, _, _ = self._webcam.frames(most_recent=True)
# Colorize PhoXi Image
phoxi_color_im = self._colorize(phoxi_depth_im, webcam_color_im)
return phoxi_color_im, phoxi_depth_im, None
|
def frames(self)
|
Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
| 4.793262
| 3.814715
| 1.256519
|
depths = []
for _ in range(num_img):
_, depth, _ = self.frames()
depths.append(depth)
median_depth = Image.median_images(depths)
median_depth.data[median_depth.data == 0.0] = fill_depth
return median_depth
|
def median_depth_img(self, num_img=1, fill_depth=0.0)
|
Collect a series of depth images and return the median of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
DepthImage
The median DepthImage collected from the frames.
| 3.2683
| 3.420025
| 0.955636
|
# Project the point cloud into the webcam's frame
target_shape = (depth_im.data.shape[0], depth_im.data.shape[1], 3)
pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im)
pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(pc_depth)
# Sort the points by their distance from the webcam's apeture
pc_data = pc_color.data.T
dists = np.linalg.norm(pc_data, axis=1)
order = np.argsort(dists)
pc_data = pc_data[order]
pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame)
sorted_dists = dists[order]
sorted_depths = depth_im.data.flatten()[order]
# Generate image coordinates for each sorted point
icds = self._webcam.color_intrinsics.project(pc_color).data.T
# Create mask for points that are masked by others
rounded_icds = np.array(icds / 3.0, dtype=np.uint32)
unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0, return_index=True, return_inverse=True)
icd_depths = sorted_dists[unique_inds]
min_depths_pp = icd_depths[unique_inv]
depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 5e-3
# Create mask for points with missing depth or that lie outside the image
valid_mask = np.logical_and(np.logical_and(icds[:,0] >= 0, icds[:,0] < self._webcam.color_intrinsics.width),
np.logical_and(icds[:,1] >= 0, icds[:,1] < self._webcam.color_intrinsics.height))
valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0)
valid_mask = np.logical_and(valid_mask, depth_delta_mask)
valid_icds = icds[valid_mask]
colors = color_im.data[valid_icds[:,1],valid_icds[:,0],:]
color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8)
color_im_data[valid_mask] = colors
color_im_data[order] = color_im_data.copy()
color_im_data = color_im_data.reshape(target_shape)
return ColorImage(color_im_data, frame=self._frame)
|
def _colorize(self, depth_im, color_im)
|
Colorize a depth image from the PhoXi using a color image from the webcam.
Parameters
----------
depth_im : DepthImage
The PhoXi depth image.
color_im : ColorImage
Corresponding color image.
Returns
-------
ColorImage
A colorized image corresponding to the PhoXi depth image.
| 2.815706
| 2.757844
| 1.020981
|
sensor_type = sensor_type.lower()
if sensor_type == 'kinect2':
s = Kinect2Sensor(packet_pipeline_mode=cfg['pipeline_mode'],
device_num=cfg['device_num'],
frame=cfg['frame'])
elif sensor_type == 'bridged_kinect2':
s = KinectSensorBridged(quality=cfg['quality'], frame=cfg['frame'])
elif sensor_type == 'primesense':
flip_images = True
if 'flip_images' in cfg.keys():
flip_images = cfg['flip_images']
s = PrimesenseSensor(auto_white_balance=cfg['auto_white_balance'],
flip_images=flip_images,
frame=cfg['frame'])
elif sensor_type == 'virtual':
s = VirtualSensor(cfg['image_dir'],
frame=cfg['frame'])
elif sensor_type == 'tensor_dataset':
s = TensorDatasetVirtualSensor(cfg['dataset_dir'],
frame=cfg['frame'])
elif sensor_type == 'primesense_ros':
s = PrimesenseSensor_ROS(frame=cfg['frame'])
elif sensor_type == 'ensenso':
s = EnsensoSensor(frame=cfg['frame'])
elif sensor_type == 'phoxi':
s = PhoXiSensor(frame=cfg['frame'],
device_name=cfg['device_name'],
size=cfg['size'])
elif sensor_type == 'webcam':
s = WebcamSensor(frame=cfg['frame'],
device_id=cfg['device_id'])
elif sensor_type == 'colorized_phoxi':
s = ColorizedPhoXiSensor(frame=cfg['frame'], phoxi_config=cfg['phoxi_config'],
webcam_config=cfg['webcam_config'], calib_dir=cfg['calib_dir'])
elif sensor_type == 'realsense':
s = RealSenseSensor(
cam_id=cfg['cam_id'],
filter_depth=cfg['filter_depth'],
frame=cfg['frame'],
)
else:
raise ValueError('RGBD sensor type %s not supported' %(sensor_type))
return s
|
def sensor(sensor_type, cfg)
|
Creates a camera sensor of the specified type.
Parameters
----------
sensor_type : :obj:`str`
the type of the sensor (real or virtual)
cfg : :obj:`YamlConfig`
dictionary of parameters for sensor initialization
| 2.534265
| 2.589846
| 0.978539
|
inds = np.where(np.linalg.norm(point - all_points, axis=1) < eps)
if inds[0].shape[0] == 0:
return -1
return inds[0][0]
|
def get_point_index(point, all_points, eps = 1e-4)
|
Get the index of a point in an array
| 2.052876
| 2.029287
| 1.011624
|
if not isinstance(source_obj_features, f.BagOfFeatures):
raise ValueError('Must supply source bag of object features')
if not isinstance(target_obj_features, f.BagOfFeatures):
raise ValueError('Must supply target bag of object features')
# source feature descriptors and keypoints
source_descriptors = source_obj_features.descriptors
target_descriptors = target_obj_features.descriptors
source_keypoints = source_obj_features.keypoints
target_keypoints = target_obj_features.keypoints
#calculate distance between this model's descriptors and each of the other_model's descriptors
dists = spatial.distance.cdist(source_descriptors, target_descriptors)
#calculate the indices of the target_model that minimize the distance to the descriptors in this model
source_closest_descriptors = dists.argmin(axis=1)
target_closest_descriptors = dists.argmin(axis=0)
match_indices = []
source_matched_points = np.zeros((0,3))
target_matched_points = np.zeros((0,3))
#calculate which points/indices the closest descriptors correspond to
for i, j in enumerate(source_closest_descriptors):
# for now, only keep correspondences that are a 2-way match
if target_closest_descriptors[j] == i:
match_indices.append(j)
source_matched_points = np.r_[source_matched_points, source_keypoints[i:i+1, :]]
target_matched_points = np.r_[target_matched_points, target_keypoints[j:j+1, :]]
else:
match_indices.append(-1)
return Correspondences(match_indices, source_matched_points, target_matched_points)
|
def match(self, source_obj_features, target_obj_features)
|
Matches features between two graspable objects based on a full distance matrix.
Parameters
----------
source_obj_features : :obj:`BagOfFeatures`
bag of the source objects features
target_obj_features : :obj:`BagOfFeatures`
bag of the target objects features
Returns
-------
corrs : :obj:`Correspondences`
the correspondences between source and target
| 2.726534
| 2.648036
| 1.029644
|
# compute the distances and inner products between the point sets
dists = ssd.cdist(source_points, target_points, 'euclidean')
ip = source_normals.dot(target_normals.T) # abs because we don't have correct orientations
source_ip = source_points.dot(target_normals.T)
target_ip = target_points.dot(target_normals.T)
target_ip = np.diag(target_ip)
target_ip = np.tile(target_ip, [source_points.shape[0], 1])
abs_diff = np.abs(source_ip - target_ip) # difference in inner products
# mark invalid correspondences
invalid_dists = np.where(dists > self.dist_thresh_)
abs_diff[invalid_dists[0], invalid_dists[1]] = np.inf
invalid_norms = np.where(ip < self.norm_thresh_)
abs_diff[invalid_norms[0], invalid_norms[1]] = np.inf
# choose the closest matches
match_indices = np.argmin(abs_diff, axis=1)
match_vals = np.min(abs_diff, axis=1)
invalid_matches = np.where(match_vals == np.inf)
match_indices[invalid_matches[0]] = -1
return NormalCorrespondences(match_indices, source_points, target_points, source_normals, target_normals)
|
def match(self, source_points, target_points, source_normals, target_normals)
|
Matches points between two point-normal sets. Uses the closest ip to choose matches, with distance for thresholding only.
Parameters
----------
source_point_cloud : Nx3 :obj:`numpy.ndarray`
source object points
target_point_cloud : Nx3 :obj:`numpy.ndarray`
target object points
source_normal_cloud : Nx3 :obj:`numpy.ndarray`
source object outward-pointing normals
target_normal_cloud : Nx3 :obj`numpy.ndarray`
target object outward-pointing normals
Returns
-------
:obj`Correspondences`
the correspondences between source and target
| 2.662332
| 2.658699
| 1.001367
|
self._cfg.enable_device(self.id)
# configure the color stream
self._cfg.enable_stream(
rs.stream.color,
RealSenseSensor.COLOR_IM_WIDTH,
RealSenseSensor.COLOR_IM_HEIGHT,
rs.format.bgr8,
RealSenseSensor.FPS
)
# configure the depth stream
self._cfg.enable_stream(
rs.stream.depth,
RealSenseSensor.DEPTH_IM_WIDTH,
360 if self._depth_align else RealSenseSensor.DEPTH_IM_HEIGHT,
rs.format.z16,
RealSenseSensor.FPS
)
|
def _config_pipe(self)
|
Configures the pipeline to stream color and depth.
| 2.886195
| 2.564033
| 1.125647
|
sensor = self._profile.get_device().first_depth_sensor()
self._depth_scale = sensor.get_depth_scale()
|
def _set_depth_scale(self)
|
Retrieve the scale of the depth sensor.
| 4.951019
| 3.307539
| 1.496889
|
strm = self._profile.get_stream(rs.stream.color)
obj = strm.as_video_stream_profile().get_intrinsics()
self._intrinsics[0, 0] = obj.fx
self._intrinsics[1, 1] = obj.fy
self._intrinsics[0, 2] = obj.ppx
self._intrinsics[1, 2] = obj.ppy
|
def _set_intrinsics(self)
|
Read the intrinsics matrix from the stream.
| 2.245258
| 2.073849
| 1.082652
|
return CameraIntrinsics(
self._frame,
self._intrinsics[0, 0],
self._intrinsics[1, 1],
self._intrinsics[0, 2],
self._intrinsics[1, 2],
height=RealSenseSensor.COLOR_IM_HEIGHT,
width=RealSenseSensor.COLOR_IM_WIDTH,
)
|
def color_intrinsics(self)
|
:obj:`CameraIntrinsics` : The camera intrinsics for the RealSense color camera.
| 3.109635
| 2.600459
| 1.195802
|
try:
self._depth_align = False
if self._registration_mode == RealSenseRegistrationMode.DEPTH_TO_COLOR:
self._depth_align = True
self._config_pipe()
self._profile = self._pipe.start(self._cfg)
# store intrinsics and depth scale
self._set_depth_scale()
self._set_intrinsics()
# skip few frames to give auto-exposure a chance to settle
for _ in range(5):
self._pipe.wait_for_frames()
self._running = True
except RuntimeError as e:
print(e)
|
def start(self)
|
Start the sensor.
| 5.438458
| 5.170919
| 1.051739
|
# check that everything is running
if not self._running:
logging.warning('Realsense not running. Aborting stop.')
return False
self._pipe.stop()
self._running = False
return True
|
def stop(self)
|
Stop the sensor.
| 7.371281
| 6.711824
| 1.098253
|
frames = self._pipe.wait_for_frames()
if self._depth_align:
frames = self._align.process(frames)
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
logging.warning('Could not retrieve frames.')
return None, None
if self._filter_depth:
depth_frame = self._filter_depth_frame(depth_frame)
# convert to numpy arrays
depth_image = self._to_numpy(depth_frame, np.float32)
color_image = self._to_numpy(color_frame, np.uint8)
# convert depth to meters
depth_image *= self._depth_scale
# bgr to rgb
color_image = color_image[..., ::-1]
depth = DepthImage(depth_image, frame=self._frame)
color = ColorImage(color_image, frame=self._frame)
return color, depth
|
def _read_color_and_depth_image(self)
|
Read a color and depth image from the device.
| 2.201262
| 2.169411
| 1.014682
|
self._cap = cv2.VideoCapture(self._device_id + cv2.CAP_V4L2)
if not self._cap.isOpened():
self._running = False
self._cap.release()
self._cap = None
return False
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, self._camera_intr.width)
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self._camera_intr.height)
self._running = True
# Capture 5 frames to flush webcam sensor
for _ in range(5):
_ = self.frames()
return True
|
def start(self)
|
Start the sensor.
| 2.844753
| 2.736228
| 1.039663
|
# Check that everything is running
if not self._running:
logging.warning('Webcam not running. Aborting stop')
return False
if self._cap:
self._cap.release()
self._cap = None
self._running = False
return True
|
def stop(self)
|
Stop the sensor.
| 4.919415
| 4.733197
| 1.039343
|
if most_recent:
for i in xrange(4):
self._cap.grab()
for i in range(1):
if self._adjust_exposure:
try:
command = 'v4l2-ctl -d /dev/video{} -c exposure_auto=1 -c exposure_auto_priority=0 -c exposure_absolute=100 -c saturation=60 -c gain=140'.format(self._device_id)
FNULL = open(os.devnull, 'w')
subprocess.call(shlex.split(command), stdout=FNULL, stderr=subprocess.STDOUT)
except:
pass
ret, frame = self._cap.read()
rgb_data = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return ColorImage(rgb_data, frame=self._frame), None, None
|
def frames(self, most_recent=False)
|
Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
most_recent: bool
If true, the OpenCV buffer is emptied for the webcam before reading the most recent frame.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
| 3.169447
| 3.040336
| 1.042466
|
num_points = msg.height * msg.width
self._format = '<' + num_points * 'ffff'
|
def _set_format(self, msg)
|
Set the buffer formatting.
| 10.109334
| 9.439609
| 1.070948
|
focal_x = msg.K[0]
focal_y = msg.K[4]
center_x = msg.K[2]
center_y = msg.K[5]
im_height = msg.height
im_width = msg.width
self._camera_intr = CameraIntrinsics(self._frame, focal_x, focal_y,
center_x, center_y,
height=im_height,
width=im_width)
|
def _set_camera_properties(self, msg)
|
Set the camera intrinsics from an info msg.
| 2.152053
| 1.9232
| 1.118996
|
# set format
if self._format is None:
self._set_format(msg)
# rescale camera intr in case binning is turned on
if msg.height != self._camera_intr.height:
rescale_factor = float(msg.height) / self._camera_intr.height
self._camera_intr = self._camera_intr.resize(rescale_factor)
# read num points
num_points = msg.height * msg.width
# read buffer
raw_tup = struct.Struct(self._format).unpack_from(msg.data, 0)
raw_arr = np.array(raw_tup)
# subsample depth values and reshape
depth_ind = 2 + 4 * np.arange(num_points)
depth_buf = raw_arr[depth_ind]
depth_arr = depth_buf.reshape(msg.height, msg.width)
depth_im = DepthImage(depth_arr, frame=self._frame)
return depth_im
|
def _depth_im_from_pointcloud(self, msg)
|
Convert a pointcloud2 message to a depth image.
| 3.532671
| 3.390183
| 1.04203
|
# initialize subscribers
self._pointcloud_sub = rospy.Subscriber('/%s/depth/points' %(self.frame), PointCloud2, self._pointcloud_callback)
self._camera_info_sub = rospy.Subscriber('/%s/left/camera_info' %(self.frame), CameraInfo, self._camera_info_callback)
while self._camera_intr is None:
time.sleep(0.1)
self._running = True
|
def start(self)
|
Start the sensor
| 3.458929
| 3.530439
| 0.979745
|
# check that everything is running
if not self._running:
logging.warning('Ensenso not running. Aborting stop')
return False
# stop subs
self._pointcloud_sub.unregister()
self._camera_info_sub.unregister()
self._running = False
return True
|
def stop(self)
|
Stop the sensor
| 7.836547
| 7.234614
| 1.083202
|
# wait for a new image
while self._cur_depth_im is None:
time.sleep(0.01)
# read next image
depth_im = self._cur_depth_im
color_im = ColorImage(np.zeros([depth_im.height,
depth_im.width,
3]).astype(np.uint8), frame=self._frame)
self._cur_depth_im = None
return color_im, depth_im, None
|
def frames(self)
|
Retrieve a new frame from the Ensenso and convert it to a ColorImage,
a DepthImage, and an IrImage.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
Raises
------
RuntimeError
If the Ensenso stream is not running.
| 3.853102
| 3.917681
| 0.983516
|
pass
|
def register(self, source_point_cloud, target_point_cloud,
source_normal_cloud, target_normal_cloud, matcher,
num_iterations=1, compute_total_cost=True, match_centroids=False,
vis=False)
|
Iteratively register objects to one another.
Parameters
----------
source_point_cloud : :obj:`autolab_core.PointCloud`
source object points
target_point_cloud : :obj`autolab_core.PointCloud`
target object points
source_normal_cloud : :obj:`autolab_core.NormalCloud`
source object outward-pointing normals
target_normal_cloud : :obj:`autolab_core.NormalCloud`
target object outward-pointing normals
matcher : :obj:`PointToPlaneFeatureMatcher`
object to match the point sets
num_iterations : int
the number of iterations to run
compute_total_cost : bool
whether or not to compute the total cost upon termination.
match_centroids : bool
whether or not to match the centroids of the point clouds
Returns
-------
:obj`RegistrationResult`
results containing source to target transformation and cost
| 5,167.578613
| 17,300.777344
| 0.298691
|
if not self._running:
raise RuntimeError('Device pointing to %s not runnning. Cannot read frames' %(self._path_to_images))
if self._im_index >= self._num_images:
raise RuntimeError('Device is out of images')
# read images
color_filename = os.path.join(self._path_to_images, 'color_%d%s' %(self._im_index, self._color_ext))
color_im = ColorImage.open(color_filename, frame=self._frame)
depth_filename = os.path.join(self._path_to_images, 'depth_%d.npy' %(self._im_index))
depth_im = DepthImage.open(depth_filename, frame=self._frame)
self._im_index = (self._im_index + 1) % self._num_images
return color_im, depth_im, None
|
def frames(self)
|
Retrieve the next frame from the image directory and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
skip_registration : bool
If True, the registration step is skipped.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
Raises
------
RuntimeError
If the stream is not running or if all images in the
directory have been used.
| 3.183703
| 2.681106
| 1.187459
|
depths = []
for _ in range(num_img):
_, depth, _ = self.frames()
depths.append(depth)
return Image.median_images(depths)
|
def median_depth_img(self, num_img=1)
|
Collect a series of depth images and return the median of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
:obj:`DepthImage`
The median DepthImage collected from the frames.
| 5.562636
| 6.022546
| 0.923635
|
if not self._running:
raise RuntimeError('Device pointing to %s not runnning. Cannot read frames' %(self._path_to_images))
if self._im_index >= self._num_images:
raise RuntimeError('Device is out of images')
# read images
datapoint = self._dataset.datapoint(self._im_index,
TensorDatasetVirtualSensor.IMAGE_FIELDS)
color_im = ColorImage(datapoint[TensorDatasetVirtualSensor.COLOR_IM_FIELD],
frame=self._frame)
depth_im = DepthImage(datapoint[TensorDatasetVirtualSensor.DEPTH_IM_FIELD],
frame=self._frame)
if self._image_rescale_factor != 1.0:
color_im = color_im.resize(self._image_rescale_factor)
depth_im = depth_im.resize(self._image_rescale_factor, interp='nearest')
self._im_index = (self._im_index + 1) % self._num_images
return color_im, depth_im, None
|
def frames(self)
|
Retrieve the next frame from the tensor dataset and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
skip_registration : bool
If True, the registration step is skipped.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
Raises
------
RuntimeError
If the stream is not running or if all images in the
directory have been used.
| 3.650934
| 3.151394
| 1.158514
|
return CameraIntrinsics(self._ir_frame, PrimesenseSensor.FOCAL_X, PrimesenseSensor.FOCAL_Y,
PrimesenseSensor.CENTER_X, PrimesenseSensor.CENTER_Y,
height=PrimesenseSensor.DEPTH_IM_HEIGHT,
width=PrimesenseSensor.DEPTH_IM_WIDTH)
|
def color_intrinsics(self)
|
:obj:`CameraIntrinsics` : The camera intrinsics for the primesense color camera.
| 4.334641
| 3.602277
| 1.203306
|
# open device
openni2.initialize(PrimesenseSensor.OPENNI2_PATH)
self._device = openni2.Device.open_any()
# open depth stream
self._depth_stream = self._device.create_depth_stream()
self._depth_stream.configure_mode(PrimesenseSensor.DEPTH_IM_WIDTH,
PrimesenseSensor.DEPTH_IM_HEIGHT,
PrimesenseSensor.FPS,
openni2.PIXEL_FORMAT_DEPTH_1_MM)
self._depth_stream.start()
# open color stream
self._color_stream = self._device.create_color_stream()
self._color_stream.configure_mode(PrimesenseSensor.COLOR_IM_WIDTH,
PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.FPS,
openni2.PIXEL_FORMAT_RGB888)
self._color_stream.camera.set_auto_white_balance(self._auto_white_balance)
self._color_stream.camera.set_auto_exposure(self._auto_exposure)
self._color_stream.start()
# configure device
if self._registration_mode == PrimesenseRegistrationMode.DEPTH_TO_COLOR:
self._device.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)
else:
self._device.set_image_registration_mode(openni2.IMAGE_REGISTRATION_OFF)
self._device.set_depth_color_sync_enabled(self._enable_depth_color_sync)
self._running = True
|
def start(self)
|
Start the sensor
| 2.232473
| 2.230704
| 1.000793
|
# check that everything is running
if not self._running or self._device is None:
logging.warning('Primesense not running. Aborting stop')
return False
# stop streams
if self._depth_stream:
self._depth_stream.stop()
if self._color_stream:
self._color_stream.stop()
self._running = False
# Unload openni2
openni2.unload()
return True
|
def stop(self)
|
Stop the sensor
| 4.639801
| 4.430437
| 1.047256
|
# read raw uint16 buffer
im_arr = self._depth_stream.read_frame()
raw_buf = im_arr.get_buffer_as_uint16()
buf_array = np.array([raw_buf[i] for i in range(PrimesenseSensor.DEPTH_IM_WIDTH * PrimesenseSensor.DEPTH_IM_HEIGHT)])
# convert to image in meters
depth_image = buf_array.reshape(PrimesenseSensor.DEPTH_IM_HEIGHT,
PrimesenseSensor.DEPTH_IM_WIDTH)
depth_image = depth_image * MM_TO_METERS # convert to meters
if self._flip_images:
depth_image = np.flipud(depth_image)
else:
depth_image = np.fliplr(depth_image)
return DepthImage(depth_image, frame=self._frame)
|
def _read_depth_image(self)
|
Reads a depth image from the device
| 3.461277
| 3.47287
| 0.996662
|
# read raw buffer
im_arr = self._color_stream.read_frame()
raw_buf = im_arr.get_buffer_as_triplet()
r_array = np.array([raw_buf[i][0] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])
g_array = np.array([raw_buf[i][1] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])
b_array = np.array([raw_buf[i][2] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])
# convert to uint8 image
color_image = np.zeros([PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH, 3])
color_image[:,:,0] = r_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.COLOR_IM_WIDTH)
color_image[:,:,1] = g_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.COLOR_IM_WIDTH)
color_image[:,:,2] = b_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.COLOR_IM_WIDTH)
if self._flip_images:
color_image = np.flipud(color_image.astype(np.uint8))
else:
color_image = np.fliplr(color_image.astype(np.uint8))
return ColorImage(color_image, frame=self._frame)
|
def _read_color_image(self)
|
Reads a color image from the device
| 1.911005
| 1.930692
| 0.989803
|
color_im = self._read_color_image()
depth_im = self._read_depth_image()
return color_im, depth_im, None
|
def frames(self)
|
Retrieve a new frame from the Kinect and convert it to a ColorImage,
a DepthImage, and an IrImage.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
Raises
------
RuntimeError
If the Kinect stream is not running.
| 4.176788
| 5.031694
| 0.830096
|
depths = []
for _ in range(num_img):
_, depth, _ = self.frames()
depths.append(depth)
return Image.min_images(depths)
|
def min_depth_img(self, num_img=1)
|
Collect a series of depth images and return the min of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
:obj:`DepthImage`
The min DepthImage collected from the frames.
| 5.652231
| 6.335126
| 0.892205
|
rospy.wait_for_service(stream_buffer, timeout = self.timeout)
ros_image_buffer = rospy.ServiceProxy(stream_buffer, ImageBuffer)
ret = ros_image_buffer(number, 1)
if not staleness_limit == None:
if ret.timestamps[-1] > staleness_limit:
raise RuntimeError("Got data {0} seconds old, more than allowed {1} seconds"
.format(ret.timestamps[-1], staleness_limit))
data = ret.data.reshape(ret.data_dim1, ret.data_dim2, ret.data_dim3).astype(ret.dtype)
# Special handling for 1 element, since dstack's behavior is different
if number == 1:
return [data]
return np.dsplit(data, number)
|
def _ros_read_images(self, stream_buffer, number, staleness_limit = 10.)
|
Reads images from a stream buffer
Parameters
----------
stream_buffer : string
absolute path to the image buffer service
number : int
The number of frames to get. Must be less than the image buffer service's
current buffer size
staleness_limit : float, optional
Max value of how many seconds old the oldest image is. If the oldest image
grabbed is older than this value, a RuntimeError is thrown.
If None, staleness is ignored.
Returns
-------
List of nump.ndarray objects, each one an image
Images are in reverse chronological order (newest first)
| 3.77023
| 3.470445
| 1.086382
|
depth_images = self._ros_read_images(self._depth_image_buffer, num_images, self.staleness_limit)
for i in range(0, num_images):
depth_images[i] = depth_images[i] * MM_TO_METERS # convert to meters
if self._flip_images:
depth_images[i] = np.flipud(depth_images[i])
depth_images[i] = np.fliplr(depth_images[i])
depth_images[i] = DepthImage(depth_images[i], frame=self._frame)
return depth_images
|
def _read_depth_images(self, num_images)
|
Reads depth images from the device
| 2.940789
| 2.902262
| 1.013275
|
color_images = self._ros_read_images(self._color_image_buffer, num_images, self.staleness_limit)
for i in range(0, num_images):
if self._flip_images:
color_images[i] = np.flipud(color_images[i].astype(np.uint8))
color_images[i] = np.fliplr(color_images[i].astype(np.uint8))
color_images[i] = ColorImage(color_images[i], frame=self._frame)
return color_images
|
def _read_color_images(self, num_images)
|
Reads color images from the device
| 2.879018
| 2.850736
| 1.009921
|
depths = self._read_depth_images(num_img)
median_depth = Image.median_images(depths)
median_depth.data[median_depth.data == 0.0] = fill_depth
return median_depth
|
def median_depth_img(self, num_img=1, fill_depth=0.0)
|
Collect a series of depth images and return the median of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
:obj:`DepthImage`
The median DepthImage collected from the frames.
| 3.519611
| 4.111837
| 0.85597
|
depths = self._read_depth_images(num_img)
return Image.min_images(depths)
|
def min_depth_img(self, num_img=1)
|
Collect a series of depth images and return the min of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
:obj:`DepthImage`
The min DepthImage collected from the frames.
| 9.260179
| 14.15816
| 0.654052
|
if self.stable_pose is None:
T_obj_world = RigidTransform(from_frame='obj', to_frame='world')
else:
T_obj_world = self.stable_pose.T_obj_table.as_frames('obj', 'world')
T_camera_obj = T_obj_world.inverse() * self.T_camera_world
return T_camera_obj
|
def T_obj_camera(self)
|
Returns the transformation from camera to object when the object is in the given stable pose.
Returns
-------
:obj:`autolab_core.RigidTransform`
The desired transform.
| 3.423125
| 3.060568
| 1.118461
|
if render_mode == RenderMode.COLOR:
return self.color_im
elif render_mode == RenderMode.DEPTH:
return self.depth_im
elif render_mode == RenderMode.SEGMASK:
return self.binary_im
else:
return None
|
def image(self, render_mode)
|
Return an image generated with a particular render mode.
Parameters
----------
render_mode : :obj:`RenderMode`
The type of image we want.
Returns
-------
:obj:`Image`
The color, depth, or binary image if render_mode is
COLOR, DEPTH, or SEGMASK respectively.
| 2.556269
| 2.2671
| 1.12755
|
self.features_.append(feature)
self.num_features_ = len(self.features_)
|
def add(self, feature)
|
Add a new feature to the bag.
Parameters
----------
feature : :obj:`Feature`
feature to add
| 4.100671
| 5.454941
| 0.751735
|
self.features_.extend(features)
self.num_features_ = len(self.features_)
|
def extend(self, features)
|
Add a list of features to the bag.
Parameters
----------
feature : :obj:`list` of :obj:`Feature`
features to add
| 4.448814
| 5.884033
| 0.756083
|
if index < 0 or index >= self.num_features_:
raise ValueError('Index %d out of range' %(index))
return self.features_[index]
|
def feature(self, index)
|
Returns a feature.
Parameters
----------
index : int
index of feature in list
Returns
-------
:obj:`Feature`
| 3.647671
| 4.757985
| 0.766642
|
if isinstance(indices, np.ndarray):
indices = indices.tolist()
if not isinstance(indices, list):
raise ValueError('Can only index with lists')
return [self.features_[i] for i in indices]
|
def feature_subset(self, indices)
|
Returns some subset of the features.
Parameters
----------
indices : :obj:`list` of :obj:`int`
indices of the features in the list
Returns
-------
:obj:`list` of :obj:`Feature`
| 3.477026
| 4.157506
| 0.836325
|
if rospy.get_name() == '/unnamed':
raise ValueError('Weight sensor must be run inside a ros node!')
self._weight_subscriber = rospy.Subscriber('weight_sensor/weights', Float32MultiArray, self._weights_callback)
self._running = True
|
def start(self)
|
Start the sensor.
| 7.403107
| 6.389857
| 1.158572
|
if not self._running:
return
self._weight_subscriber.unregister()
self._running = False
|
def stop(self)
|
Stop the sensor.
| 8.694901
| 7.560225
| 1.150085
|
weights = self._raw_weights()
if weights.shape[1] == 0:
return 0.0
elif weights.shape[1] < self._ntaps:
return np.sum(np.mean(weights, axis=1))
else:
return self._filter_coeffs.dot(np.sum(weights, axis=0))
|
def total_weight(self)
|
Read a weight from the sensor in grams.
Returns
-------
weight : float
The sensor weight in grams.
| 4.272283
| 4.652349
| 0.918307
|
weights = self._raw_weights()
if weights.shape[1] == 0:
return np.zeros(weights.shape[0])
elif weights.shape[1] < self._ntaps:
return np.mean(weights, axis=1)
else:
return weights.dot(self._filter_coeffs)
|
def individual_weights(self)
|
Read individual weights from the load cells in grams.
Returns
-------
weight : float
The sensor weight in grams.
| 3.833104
| 4.536433
| 0.84496
|
if self._debug:
return np.array([[],[],[],[]])
if not self._running:
raise ValueError('Weight sensor is not running!')
if len(self._weight_buffers) == 0:
time.sleep(0.3)
if len(self._weight_buffers) == 0:
raise ValueError('Weight sensor is not retrieving data!')
weights = np.array(self._weight_buffers)
return weights
|
def _raw_weights(self)
|
Create a numpy array containing the raw sensor weights.
| 3.984508
| 3.398856
| 1.172308
|
# Read weights
weights = np.array(msg.data)
# If needed, initialize indiv_weight_buffers
if len(self._weight_buffers) == 0:
self._weight_buffers = [[] for i in range(len(weights))]
# Record individual weights
for i, w in enumerate(weights):
if len(self._weight_buffers[i]) == self._ntaps:
self._weight_buffers[i].pop(0)
self._weight_buffers[i].append(w)
|
def _weights_callback(self, msg)
|
Callback for recording weights from sensor.
| 3.226987
| 3.206011
| 1.006543
|
return np.r_[self.fx, self.fy, self.cx, self.cy, self.skew, self.height, self.width]
|
def vec(self)
|
:obj:`numpy.ndarray` : Vector representation for this camera.
| 4.997092
| 3.292459
| 1.517739
|
from sensor_msgs.msg import CameraInfo, RegionOfInterest
from std_msgs.msg import Header
msg_header = Header()
msg_header.frame_id = self._frame
msg_roi = RegionOfInterest()
msg_roi.x_offset = 0
msg_roi.y_offset = 0
msg_roi.height = 0
msg_roi.width = 0
msg_roi.do_rectify = 0
msg = CameraInfo()
msg.header = msg_header
msg.height = self._height
msg.width = self._width
msg.distortion_model = 'plumb_bob'
msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
msg.K = [self._fx, 0.0, self._cx, 0.0, self._fy, self._cy, 0.0, 0.0, 1.0]
msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
msg.P = [self._fx, 0.0, self._cx, 0.0, 0.0, self._fx, self._cy, 0.0, 0.0, 0.0, 1.0, 0.0]
msg.binning_x = 0
msg.binning_y = 0
msg.roi = msg_roi
return msg
|
def rosmsg(self)
|
:obj:`sensor_msgs.CamerInfo` : Returns ROS CamerInfo msg
| 1.379645
| 1.375922
| 1.002706
|
cx = self.cx + float(width-1)/2 - crop_cj
cy = self.cy + float(height-1)/2 - crop_ci
cropped_intrinsics = CameraIntrinsics(frame=self.frame,
fx=self.fx,
fy=self.fy,
skew=self.skew,
cx=cx, cy=cy,
height=height,
width=width)
return cropped_intrinsics
|
def crop(self, height, width, crop_ci, crop_cj)
|
Convert to new camera intrinsics for crop of image from original camera.
Parameters
----------
height : int
height of crop window
width : int
width of crop window
crop_ci : int
row of crop window center
crop_cj : int
col of crop window center
Returns
-------
:obj:`CameraIntrinsics`
camera intrinsics for cropped window
| 2.839606
| 2.665865
| 1.065173
|
center_x = float(self.width-1) / 2
center_y = float(self.height-1) / 2
orig_cx_diff = self.cx - center_x
orig_cy_diff = self.cy - center_y
height = scale * self.height
width = scale * self.width
scaled_center_x = float(width-1) / 2
scaled_center_y = float(height-1) / 2
fx = scale * self.fx
fy = scale * self.fy
skew = scale * self.skew
cx = scaled_center_x + scale * orig_cx_diff
cy = scaled_center_y + scale * orig_cy_diff
scaled_intrinsics = CameraIntrinsics(frame=self.frame,
fx=fx, fy=fy, skew=skew, cx=cx, cy=cy,
height=height, width=width)
return scaled_intrinsics
|
def resize(self, scale)
|
Convert to new camera intrinsics with parameters for resized image.
Parameters
----------
scale : float
the amount to rescale the intrinsics
Returns
-------
:obj:`CameraIntrinsics`
camera intrinsics for resized image
| 2.136648
| 2.047278
| 1.043653
|
if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):
raise ValueError('Must provide PointCloud or 3D Point object for projection')
if point_cloud.frame != self._frame:
raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))
points_proj = self._K.dot(point_cloud.data)
if len(points_proj.shape) == 1:
points_proj = points_proj[:, np.newaxis]
point_depths = np.tile(points_proj[2,:], [3, 1])
points_proj = np.divide(points_proj, point_depths)
if round_px:
points_proj = np.round(points_proj)
if isinstance(point_cloud, Point):
return Point(data=points_proj[:2,:].astype(np.int16), frame=self._frame)
return ImageCoords(data=points_proj[:2,:].astype(np.int16), frame=self._frame)
|
def project(self, point_cloud, round_px=True)
|
Projects a point cloud onto the camera image plane.
Parameters
----------
point_cloud : :obj:`autolab_core.PointCloud` or :obj:`autolab_core.Point`
A PointCloud or Point to project onto the camera image plane.
round_px : bool
If True, projections are rounded to the nearest pixel.
Returns
-------
:obj:`autolab_core.ImageCoords` or :obj:`autolab_core.Point`
A corresponding set of image coordinates representing the given
PointCloud's projections onto the camera image plane. If the input
was a single Point, returns a 2D Point in the camera plane.
Raises
------
ValueError
If the input is not a PointCloud or Point in the same reference
frame as the camera.
| 2.687979
| 2.37137
| 1.133513
|
point_cloud = self.deproject(depth_image)
point_cloud_im_data = point_cloud.data.T.reshape(depth_image.height, depth_image.width, 3)
return PointCloudImage(data=point_cloud_im_data,
frame=self._frame)
|
def deproject_to_image(self, depth_image)
|
Deprojects a DepthImage into a PointCloudImage.
Parameters
----------
depth_image : :obj:`DepthImage`
The 2D depth image to projet into a point cloud.
Returns
-------
:obj:`PointCloudImage`
A point cloud image created from the depth image.
Raises
------
ValueError
If depth_image is not a valid DepthImage in the same reference frame
as the camera.
| 3.556453
| 3.842339
| 0.925596
|
file_root, file_ext = os.path.splitext(filename)
if file_ext.lower() != INTR_EXTENSION:
raise ValueError('Extension %s not supported for CameraIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION))
f = open(filename, 'r')
ci = json.load(f)
f.close()
return CameraIntrinsics(frame=ci['_frame'],
fx=ci['_fx'],
fy=ci['_fy'],
cx=ci['_cx'],
cy=ci['_cy'],
skew=ci['_skew'],
height=ci['_height'],
width=ci['_width'])
|
def load(filename)
|
Load a CameraIntrinsics object from a file.
Parameters
----------
filename : :obj:`str`
The .intr file to load the object from.
Returns
-------
:obj:`CameraIntrinsics`
The CameraIntrinsics object loaded from the file.
Raises
------
ValueError
If filename does not have the .intr extension.
| 3.018489
| 2.795688
| 1.079695
|
if 'prestored_data' in cfg.keys() and cfg['prestored_data'] == 1:
sensor = VirtualKinect2Sensor(path_to_images=cfg['prestored_data_dir'], frame=cfg['sensor']['frame'])
else:
sensor = Kinect2Sensor(device_num=cfg['sensor']['device_num'], frame=cfg['sensor']['frame'],
packet_pipeline_mode=cfg['sensor']['pipeline_mode'])
sensor.start()
ir_intrinsics = sensor.ir_intrinsics
# get raw images
colors = []
depths = []
for _ in range(cfg['num_images']):
color, depth, _ = sensor.frames()
colors.append(color)
depths.append(depth)
sensor.stop()
return colors, depths, ir_intrinsics
|
def load_images(cfg)
|
Helper function for loading a set of color images, depth images, and IR
camera intrinsics.
The config dictionary must have these keys:
- prestored_data -- If 1, use the virtual sensor, else use a real sensor.
- prestored_data_dir -- A path to the prestored data dir for a virtual sensor.
- sensor/frame -- The frame of reference for the sensor.
- sensor/device_num -- The device number for the real Kinect.
- sensor/pipeline_mode -- The mode for the real Kinect's packet pipeline.
- num_images -- The number of images to generate.
Parameters
----------
cfg : :obj:`dict`
A config dictionary.
Returns
-------
:obj:`tuple` of :obj:`list` of :obj:`ColorImage`, :obj:`list` of :obj:`DepthImage`, :obj:`CameraIntrinsics`
A set of ColorImages and DepthImages, and the Kinect's CameraIntrinsics
for its IR sensor.
| 3.794122
| 2.286277
| 1.65952
|
if self._device is None:
raise RuntimeError('Kinect2 device %s not runnning. Cannot return color intrinsics')
camera_params = self._device.getColorCameraParams()
return CameraIntrinsics(self._color_frame, camera_params.fx, camera_params.fy,
camera_params.cx, camera_params.cy)
|
def color_intrinsics(self)
|
:obj:`CameraIntrinsics` : The camera intrinsics for the Kinect's color camera.
| 5.081311
| 4.291388
| 1.184072
|
if self._device is None:
raise RuntimeError('Kinect2 device %s not runnning. Cannot return IR intrinsics')
camera_params = self._device.getIrCameraParams()
return CameraIntrinsics(self._ir_frame, camera_params.fx, camera_params.fy,
camera_params.cx, camera_params.cy,
height=Kinect2Sensor.DEPTH_IM_HEIGHT,
width=Kinect2Sensor.DEPTH_IM_WIDTH)
|
def ir_intrinsics(self)
|
:obj:`CameraIntrinsics` : The camera intrinsics for the Kinect's IR camera.
| 5.113207
| 4.241294
| 1.205577
|
# open packet pipeline
if self._packet_pipeline_mode == Kinect2PacketPipelineMode.OPENGL:
self._pipeline = lf2.OpenGLPacketPipeline()
elif self._packet_pipeline_mode == Kinect2PacketPipelineMode.CPU:
self._pipeline = lf2.CpuPacketPipeline()
# setup logger
self._logger = lf2.createConsoleLogger(lf2.LoggerLevel.Warning)
lf2.setGlobalLogger(self._logger)
# check devices
self._fn_handle = lf2.Freenect2()
self._num_devices = self._fn_handle.enumerateDevices()
if self._num_devices == 0:
raise IOError('Failed to start stream. No Kinect2 devices available!')
if self._num_devices <= self._device_num:
raise IOError('Failed to start stream. Device num %d unavailable!' %(self._device_num))
# open device
self._serial = self._fn_handle.getDeviceSerialNumber(self._device_num)
self._device = self._fn_handle.openDevice(self._serial, pipeline=self._pipeline)
# add device sync modes
self._listener = lf2.SyncMultiFrameListener(
lf2.FrameType.Color | lf2.FrameType.Ir | lf2.FrameType.Depth)
self._device.setColorFrameListener(self._listener)
self._device.setIrAndDepthFrameListener(self._listener)
# start device
self._device.start()
# open registration
self._registration = None
if self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH:
logging.debug('Using color to depth registration')
self._registration = lf2.Registration(self._device.getIrCameraParams(),
self._device.getColorCameraParams())
self._running = True
|
def start(self)
|
Starts the Kinect v2 sensor stream.
Raises
------
IOError
If the Kinect v2 is not detected.
| 3.303367
| 3.284191
| 1.005839
|
# check that everything is running
if not self._running or self._device is None:
logging.warning('Kinect2 device %d not runnning. Aborting stop' %(self._device_num))
return False
# stop the device
self._device.stop()
self._device.close()
self._device = None
self._running = False
return True
|
def stop(self)
|
Stops the Kinect2 sensor stream.
Returns
-------
bool
True if the stream was stopped, False if the device was already
stopped or was not otherwise available.
| 4.842424
| 4.306726
| 1.124386
|
color_im, depth_im, ir_im, _ = self._frames_and_index_map(skip_registration=skip_registration)
return color_im, depth_im, ir_im
|
def frames(self, skip_registration=False)
|
Retrieve a new frame from the Kinect and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
skip_registration : bool
If True, the registration step is skipped.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
Raises
------
RuntimeError
If the Kinect stream is not running.
| 4.674322
| 4.612886
| 1.013318
|
if not self._running:
raise RuntimeError('Kinect2 device %s not runnning. Cannot read frames' %(self._device_num))
# read frames
frames = self._listener.waitForNewFrame()
unregistered_color = frames['color']
distorted_depth = frames['depth']
ir = frames['ir']
# apply color to depth registration
color_frame = self._color_frame
color = unregistered_color
depth = distorted_depth
color_depth_map = np.zeros([depth.height, depth.width]).astype(np.int32).ravel()
if not skip_registration and self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH:
color_frame = self._ir_frame
depth = lf2.Frame(depth.width, depth.height, 4, lf2.FrameType.Depth)
color = lf2.Frame(depth.width, depth.height, 4, lf2.FrameType.Color)
self._registration.apply(unregistered_color, distorted_depth, depth, color, color_depth_map=color_depth_map)
# convert to array (copy needed to prevent reference of deleted data
color_arr = copy.copy(color.asarray())
color_arr[:,:,[0,2]] = color_arr[:,:,[2,0]] # convert BGR to RGB
color_arr[:,:,0] = np.fliplr(color_arr[:,:,0])
color_arr[:,:,1] = np.fliplr(color_arr[:,:,1])
color_arr[:,:,2] = np.fliplr(color_arr[:,:,2])
color_arr[:,:,3] = np.fliplr(color_arr[:,:,3])
depth_arr = np.fliplr(copy.copy(depth.asarray()))
ir_arr = np.fliplr(copy.copy(ir.asarray()))
# convert meters
if self._depth_mode == Kinect2DepthMode.METERS:
depth_arr = depth_arr * MM_TO_METERS
# Release and return
self._listener.release(frames)
return (ColorImage(color_arr[:,:,:3], color_frame),
DepthImage(depth_arr, self._ir_frame),
IrImage(ir_arr.astype(np.uint16), self._ir_frame),
color_depth_map)
|
def _frames_and_index_map(self, skip_registration=False)
|
Retrieve a new frame from the Kinect and return a ColorImage,
DepthImage, IrImage, and a map from depth pixels to color pixel indices.
Parameters
----------
skip_registration : bool
If True, the registration step is skipped.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame, and an
ndarray that maps pixels of the depth image to the index of the
corresponding pixel in the color image.
Raises
------
RuntimeError
If the Kinect stream is not running.
| 2.990165
| 2.7951
| 1.069788
|
encoding = msg.encoding
try:
image = self._bridge.imgmsg_to_cv2(msg, encoding)
except CvBridgeError as e:
rospy.logerr(e)
return image
|
def _process_image_msg(self, msg)
|
Process an image message and return a numpy array with the image data
Returns
-------
:obj:`numpy.ndarray` containing the image in the image message
Raises
------
CvBridgeError
If the bridge is not able to convert the image
| 2.624664
| 3.124975
| 0.839899
|
color_arr = self._process_image_msg(image_msg)
self._cur_color_im = ColorImage(color_arr[:,:,::-1], self._frame)
|
def _color_image_callback(self, image_msg)
|
subscribe to image topic and keep it up to date
| 6.693361
| 6.333189
| 1.056871
|
encoding = image_msg.encoding
try:
depth_arr = self._bridge.imgmsg_to_cv2(image_msg, encoding)
import pdb; pdb.set_trace()
except CvBridgeError as e:
rospy.logerr(e)
depth = np.array(depth_arr*MM_TO_METERS, np.float32)
self._cur_depth_im = DepthImage(depth, self._frame)
|
def _depth_image_callback(self, image_msg)
|
subscribe to depth image topic and keep it up to date
| 3.521926
| 3.51416
| 1.00221
|
# initialize subscribers
self._image_sub = rospy.Subscriber(self.topic_image_color, sensor_msgs.msg.Image, self._color_image_callback)
self._depth_sub = rospy.Subscriber(self.topic_image_depth, sensor_msgs.msg.Image, self._depth_image_callback)
self._camera_info_sub = rospy.Subscriber(self.topic_info_camera, sensor_msgs.msg.CameraInfo, self._camera_info_callback)
timeout = 10
try:
rospy.loginfo("waiting to recieve a message from the Kinect")
rospy.wait_for_message(self.topic_image_color, sensor_msgs.msg.Image, timeout=timeout)
rospy.wait_for_message(self.topic_image_depth, sensor_msgs.msg.Image, timeout=timeout)
rospy.wait_for_message(self.topic_info_camera, sensor_msgs.msg.CameraInfo, timeout=timeout)
except rospy.ROSException as e:
print("KINECT NOT FOUND")
rospy.logerr("Kinect topic not found, Kinect not started")
rospy.logerr(e)
while self._camera_intr is None:
time.sleep(0.1)
self._running = True
|
def start(self)
|
Start the sensor
| 2.203489
| 2.207549
| 0.998161
|
# check that everything is running
if not self._running:
logging.warning('Kinect not running. Aborting stop')
return False
# stop subs
self._image_sub.unregister()
self._depth_sub.unregister()
self._camera_info_sub.unregister
self._running = False
return True
|
def stop(self)
|
Stop the sensor
| 5.282899
| 5.026667
| 1.050975
|
# wait for a new image
while self._cur_depth_im is None or self._cur_color_im is None:
time.sleep(0.01)
# read next image
depth_im = self._cur_depth_im
color_im = self._cur_color_im
self._cur_color_im = None
self._cur_depth_im = None
#TODO add ir image
return color_im, depth_im, None
|
def frames(self)
|
Retrieve a new frame from the Ensenso and convert it to a ColorImage,
a DepthImage, IrImage is always none for this type
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
Raises
------
RuntimeError
If the Kinect stream is not running.
| 3.632915
| 3.307828
| 1.098278
|
if not self._running:
raise RuntimeError('VirtualKinect2 device pointing to %s not runnning. Cannot read frames' %(self._path_to_images))
if self._im_index > self._num_images:
raise RuntimeError('VirtualKinect2 device is out of images')
# read images
color_filename = os.path.join(self._path_to_images, 'color_%d.png' %(self._im_index))
color_im = ColorImage.open(color_filename, frame=self._frame)
depth_filename = os.path.join(self._path_to_images, 'depth_%d.npy' %(self._im_index))
depth_im = DepthImage.open(depth_filename, frame=self._frame)
ir_filename = os.path.join(self._path_to_images, 'ir_%d.npy' %(self._im_index))
ir_im = None
if os.path.exists(ir_filename):
ir_im = IrImage.open(ir_filename, frame=self._frame)
self._im_index += 1
return color_im, depth_im, ir_im
|
def frames(self)
|
Retrieve the next frame from the image directory and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
skip_registration : bool
If True, the registration step is skipped.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
Raises
------
RuntimeError
If the Kinect stream is not running or if all images in the
directory have been used.
| 2.526642
| 2.182212
| 1.157835
|
sensor_type = sensor_type.lower()
if sensor_type == 'real':
s = Kinect2Sensor(packet_pipeline_mode=cfg['pipeline_mode'],
device_num=cfg['device_num'],
frame=cfg['frame'])
elif sensor_type == 'virtual':
s = VirtualKinect2Sensor(cfg['image_dir'],
frame=cfg['frame'])
elif sensor_type == 'bridged':
s = KinectSensorBridged(quality=cfg['quality'], frame=cfg['frame'])
else:
raise ValueError('Kinect2 sensor type %s not supported' %(sensor_type))
return s
|
def sensor(sensor_type, cfg)
|
Creates a Kinect2 sensor of the specified type.
Parameters
----------
sensor_type : :obj:`str`
the type of the sensor (real or virtual)
cfg : :obj:`YamlConfig`
dictionary of parameters for sensor initialization
| 3.806338
| 3.756784
| 1.013191
|
# Get all devices attached as USB serial
all_devices = glob.glob('/dev/ttyUSB*')
# Identify which of the devices are LoadStar Serial Sensors
sensors = []
for device in all_devices:
try:
ser = serial.Serial(port=device,
timeout=0.5,
exclusive=True)
ser.write('ID\r')
ser.flush()
time.sleep(0.05)
resp = ser.read(13)
ser.close()
if len(resp) >= 10 and resp[:len(id_mask)] == id_mask:
sensors.append((device, resp.rstrip('\r\n')))
except:
continue
sensors = sorted(sensors, key=lambda x : x[1])
# Connect to each of the serial devices
serials = []
for device, key in sensors:
ser = serial.Serial(port=device, timeout=0.5)
serials.append(ser)
rospy.loginfo('Connected to load cell {} at {}'.format(key, device))
return serials
|
def _connect(self, id_mask)
|
Connects to all of the load cells serially.
| 3.585518
| 3.386987
| 1.058616
|
for ser in self._serials:
ser.flush()
ser.flushInput()
ser.flushOutput()
time.sleep(0.02)
|
def _flush(self)
|
Flushes all of the serial ports.
| 3.844215
| 3.24912
| 1.183156
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.