source_code
stringlengths 45
7.24k
| function_name
stringlengths 1
111
| messages
list |
|---|---|---|
def x(block):
marker = None
value = []
for line in block.split('\n'):
line = line.strip()
if line.startswith('\\_'):
continue
match = MARKER_PATTERN.match(line)
if match:
if marker:
yield marker, '\n'.join(value)
marker = match.group('marker')
value = [line[match.end():]]
else:
value.append(line)
if marker:
yield marker, '\n'.join(value).strip()
|
marker_split
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(block):\n marker = None\n value = []\n for line in block.split('\\n'):\n line = line.strip()\n if line.startswith('\\\\_'):\n continue\n match = MARKER_PATTERN.match(line)\n if match:\n if marker:\n yield marker, '\\n'.join(value)\n marker = match.group('marker')\n value = [line[match.end():]]\n else:\n value.append(line)\n if marker:\n yield marker, '\\n'.join(value).strip()\n",
"role": "user"
},
{
"content": "marker_split",
"role": "assistant"
}
] |
def x(self, key, default=None):
for k, v in self:
if k == key:
return v
return default
|
get
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, key, default=None):\n for k, v in self:\n if k == key:\n return v\n return default\n",
"role": "user"
},
{
"content": "get",
"role": "assistant"
}
] |
def x(self, filename, encoding='utf-8', marker_map=None, entry_impl=Entry,
entry_sep='\n\n', entry_prefix=None, keep_empty=False):
marker_map = marker_map or {}
for entry in parse(filename, encoding, entry_sep, entry_prefix or
entry_sep, keep_empty=keep_empty):
if entry:
self.append(entry_impl([(marker_map.get(k, k), v) for k, v in
entry]))
|
read
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, filename, encoding='utf-8', marker_map=None, entry_impl=Entry,\n entry_sep='\\n\\n', entry_prefix=None, keep_empty=False):\n marker_map = marker_map or {}\n for entry in parse(filename, encoding, entry_sep, entry_prefix or\n entry_sep, keep_empty=keep_empty):\n if entry:\n self.append(entry_impl([(marker_map.get(k, k), v) for k, v in\n entry]))\n",
"role": "user"
},
{
"content": "read",
"role": "assistant"
}
] |
def x(self, filename, encoding='utf-8'):
with io.open(str(filename), 'w', encoding=encoding) as fp:
for entry in self:
fp.write(entry.__unicode__())
fp.write('\n\n')
|
write
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, filename, encoding='utf-8'):\n with io.open(str(filename), 'w', encoding=encoding) as fp:\n for entry in self:\n fp.write(entry.__unicode__())\n fp.write('\\n\\n')\n",
"role": "user"
},
{
"content": "write",
"role": "assistant"
}
] |
def x(question, default=True):
valid = {'': default, 'yes': True, 'y': True, 'no': False, 'n': False}
while 1:
choice = input(question + (' [Y/n] ' if default else ' [y/N] ')).lower(
)
if choice in valid:
return valid[choice]
print("Please respond with 'y' or 'n' ")
|
confirm
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(question, default=True):\n valid = {'': default, 'yes': True, 'y': True, 'no': False, 'n': False}\n while 1:\n choice = input(question + (' [Y/n] ' if default else ' [y/N] ')).lower(\n )\n if choice in valid:\n return valid[choice]\n print(\"Please respond with 'y' or 'n' \")\n",
"role": "user"
},
{
"content": "confirm",
"role": "assistant"
}
] |
def x(self, section, option, whitespace_preserving_prefix='.'):
lines = []
for line in self.get(section, option, fallback='').splitlines():
if re.match(re.escape(whitespace_preserving_prefix) + '\\s+', line):
line = line[len(whitespace_preserving_prefix):]
lines.append(line)
return '\n'.join(lines)
|
gettext
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, section, option, whitespace_preserving_prefix='.'):\n lines = []\n for line in self.get(section, option, fallback='').splitlines():\n if re.match(re.escape(whitespace_preserving_prefix) + '\\\\s+', line):\n line = line[len(whitespace_preserving_prefix):]\n lines.append(line)\n return '\\n'.join(lines)\n",
"role": "user"
},
{
"content": "gettext",
"role": "assistant"
}
] |
def x(content, mimetype=None):
if isinstance(content, pathlib.Path):
if not mimetype:
mimetype = guess_type(content.name)[0]
with content.open('rb') as fp:
content = fp.read()
elif isinstance(content, text_type):
content = content.encode('utf8')
return 'data:{0};base64,{1}'.format(mimetype or
'application/octet-stream', b64encode(content).decode())
|
data_url
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(content, mimetype=None):\n if isinstance(content, pathlib.Path):\n if not mimetype:\n mimetype = guess_type(content.name)[0]\n with content.open('rb') as fp:\n content = fp.read()\n elif isinstance(content, text_type):\n content = content.encode('utf8')\n return 'data:{0};base64,{1}'.format(mimetype or\n 'application/octet-stream', b64encode(content).decode())\n",
"role": "user"
},
{
"content": "data_url",
"role": "assistant"
}
] |
def x(s, encoding='utf8'):
if PY3:
return s if isinstance(s, binary_type) else binary_type(s, encoding
=encoding)
return binary_type(s)
|
to_binary
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(s, encoding='utf8'):\n if PY3:\n return s if isinstance(s, binary_type) else binary_type(s, encoding\n =encoding)\n return binary_type(s)\n",
"role": "user"
},
{
"content": "to_binary",
"role": "assistant"
}
] |
def x(d, _filter=None, **kw):
def x(s):
if _filter:
return _filter(s)
return s is not None
d = d or {}
for k, v in iteritems(kw):
if f(v):
d[k] = v
return d
|
dict_merged
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(d, _filter=None, **kw):\n\n def x(s):\n if _filter:\n return _filter(s)\n return s is not None\n d = d or {}\n for k, v in iteritems(kw):\n if f(v):\n d[k] = v\n return d\n",
"role": "user"
},
{
"content": "dict_merged",
"role": "assistant"
}
] |
def x(text):
invalid = list(range(9))
invalid.extend([11, 12])
invalid.extend(range(14, 32))
return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text)
|
xmlchars
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text):\n invalid = list(range(9))\n invalid.extend([11, 12])\n invalid.extend(range(14, 32))\n return re.sub('|'.join('\\\\x%0.2X' % i for i in invalid), '', text)\n",
"role": "user"
},
{
"content": "xmlchars",
"role": "assistant"
}
] |
def x(s, remove_whitespace=True, lowercase=True):
res = ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata
.category(c) != 'Mn')
if lowercase:
res = res.lower()
for c in string.punctuation:
res = res.replace(c, '')
res = re.sub('\\s+', '' if remove_whitespace else ' ', res)
res = res.encode('ascii', 'ignore').decode('ascii')
assert re.match('[ A-Za-z0-9]*$', res)
return res
|
slug
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(s, remove_whitespace=True, lowercase=True):\n res = ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata\n .category(c) != 'Mn')\n if lowercase:\n res = res.lower()\n for c in string.punctuation:\n res = res.replace(c, '')\n res = re.sub('\\\\s+', '' if remove_whitespace else ' ', res)\n res = res.encode('ascii', 'ignore').decode('ascii')\n assert re.match('[ A-Za-z0-9]*$', res)\n return res\n",
"role": "user"
},
{
"content": "slug",
"role": "assistant"
}
] |
def x(string, encoding='utf-8'):
assert isinstance(string, string_types) or isinstance(string, binary_type)
if isinstance(string, text_type):
return string.encode(encoding)
try:
string.decode(encoding)
return string
except UnicodeDecodeError:
return string.decode('latin1').encode(encoding)
|
encoded
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(string, encoding='utf-8'):\n assert isinstance(string, string_types) or isinstance(string, binary_type)\n if isinstance(string, text_type):\n return string.encode(encoding)\n try:\n string.decode(encoding)\n return string\n except UnicodeDecodeError:\n return string.decode('latin1').encode(encoding)\n",
"role": "user"
},
{
"content": "encoded",
"role": "assistant"
}
] |
def x(p, encoding=None, strip=False, comment=None, normalize=None,
linenumbers=False):
if comment:
strip = True
if isinstance(p, (list, tuple)):
res = [(l.decode(encoding) if encoding else l) for l in p]
else:
with Path(p).open(encoding=encoding or 'utf-8') as fp:
res = fp.readlines()
if strip:
res = [(l.strip() or None) for l in res]
if comment:
res = [(None if l and l.startswith(comment) else l) for l in res]
if normalize:
res = [(unicodedata.normalize(normalize, l) if l else l) for l in res]
if linenumbers:
return [(n, l) for n, l in enumerate(res, 1)]
return [l for l in res if l is not None]
|
readlines
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(p, encoding=None, strip=False, comment=None, normalize=None,\n linenumbers=False):\n if comment:\n strip = True\n if isinstance(p, (list, tuple)):\n res = [(l.decode(encoding) if encoding else l) for l in p]\n else:\n with Path(p).open(encoding=encoding or 'utf-8') as fp:\n res = fp.readlines()\n if strip:\n res = [(l.strip() or None) for l in res]\n if comment:\n res = [(None if l and l.startswith(comment) else l) for l in res]\n if normalize:\n res = [(unicodedata.normalize(normalize, l) if l else l) for l in res]\n if linenumbers:\n return [(n, l) for n, l in enumerate(res, 1)]\n return [l for l in res if l is not None]\n",
"role": "user"
},
{
"content": "readlines",
"role": "assistant"
}
] |
def x(p, mode='all', **kw):
for dirpath, dirnames, filenames in os.walk(as_posix(p), **kw):
if mode in ('all', 'dirs'):
for dirname in dirnames:
yield Path(dirpath).joinpath(dirname)
if mode in ('all', 'files'):
for fname in filenames:
yield Path(dirpath).joinpath(fname)
|
walk
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(p, mode='all', **kw):\n for dirpath, dirnames, filenames in os.walk(as_posix(p), **kw):\n if mode in ('all', 'dirs'):\n for dirname in dirnames:\n yield Path(dirpath).joinpath(dirname)\n if mode in ('all', 'files'):\n for fname in filenames:\n yield Path(dirpath).joinpath(fname)\n",
"role": "user"
},
{
"content": "walk",
"role": "assistant"
}
] |
def x(self):
m = max(itertools.chain(map(len, self), [0]))
fields = (' %s = {%s}' % (k.ljust(m), self[k]) for k in self)
return '@%s{%s,\n%s\n}' % (getattr(self.genre, 'value', self.genre),
self.id, ',\n'.join(fields))
|
bibtex
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n m = max(itertools.chain(map(len, self), [0]))\n fields = (' %s = {%s}' % (k.ljust(m), self[k]) for k in self)\n return '@%s{%s,\\n%s\\n}' % (getattr(self.genre, 'value', self.genre),\n self.id, ',\\n'.join(fields))\n",
"role": "user"
},
{
"content": "bibtex",
"role": "assistant"
}
] |
def x(self):
genre = getattr(self.genre, 'value', self.genre)
pages_at_end = genre in ('book', 'phdthesis', 'mastersthesis', 'misc',
'techreport')
thesis = genre in ('phdthesis', 'mastersthesis')
if self.get('editor'):
editors = self['editor']
affix = 'eds' if ' and ' in editors or '&' in editors else 'ed'
editors = ' %s (%s.)' % (editors, affix)
else:
editors = None
res = [self.get('author', editors), self.get('year', 'n.d')]
if genre == 'book':
res.append(self.get_with_translation('booktitle') or self.
get_with_translation('title'))
series = ', '.join(filter(None, [self.get('series'), self.get(
'volume')]))
if series:
res.append('(%s.)' % series)
elif genre == 'misc':
res.append(self.get_with_translation('title') or self.get('note'))
else:
res.append(self.get_with_translation('title'))
if genre == 'article':
atom = ' '.join(filter(None, [self.get('journal'), self.get('volume')])
)
if self.get('issue'):
atom += '(%s)' % self['issue']
res.append(atom)
res.append(self.get('pages'))
elif genre == 'incollection' or genre == 'inproceedings':
prefix = 'In'
atom = ''
if editors:
atom += editors
if self.get('booktitle'):
if atom:
atom += ','
atom += ' %s' % self.get_with_translation('booktitle')
if self.get('pages'):
atom += ', %s' % self['pages']
res.append(prefix + atom)
else:
if editors and self.get('author'):
res.append('In %s' % editors)
for attr in ['journal', 'volume' if genre != 'book' else None]:
if attr and self.get(attr):
res.append(self.get(attr))
if self.get('issue'):
res.append('(%s)' % self['issue'])
if not pages_at_end and self.get('pages'):
res.append(self['pages'])
if self.get('publisher'):
res.append(': '.join(filter(None, [self.get('address'), self[
'publisher']])))
elif genre == 'misc' and self.get('howpublished'):
res.append(self.get('howpublished'))
if not thesis and pages_at_end and self.get('pages'):
res.append(self['pages'] + 'pp')
note = self.get('note') or self._genre_note.get(genre)
if note and note not in res:
if thesis:
joiner = ','
if self.get('school'):
note += '{0} {1}'.format(joiner, self.get('school'))
joiner = ';'
if self.get('pages'):
note += '{0} {1}pp.'.format(joiner, self.get('pages'))
res.append('(%s)' % note)
return ' '.join(x if x.endswith(('.', '.)')) else '%s.' % x for x in
res if x)
|
text
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n genre = getattr(self.genre, 'value', self.genre)\n pages_at_end = genre in ('book', 'phdthesis', 'mastersthesis', 'misc',\n 'techreport')\n thesis = genre in ('phdthesis', 'mastersthesis')\n if self.get('editor'):\n editors = self['editor']\n affix = 'eds' if ' and ' in editors or '&' in editors else 'ed'\n editors = ' %s (%s.)' % (editors, affix)\n else:\n editors = None\n res = [self.get('author', editors), self.get('year', 'n.d')]\n if genre == 'book':\n res.append(self.get_with_translation('booktitle') or self.\n get_with_translation('title'))\n series = ', '.join(filter(None, [self.get('series'), self.get(\n 'volume')]))\n if series:\n res.append('(%s.)' % series)\n elif genre == 'misc':\n res.append(self.get_with_translation('title') or self.get('note'))\n else:\n res.append(self.get_with_translation('title'))\n if genre == 'article':\n atom = ' '.join(filter(None, [self.get('journal'), self.get('volume')])\n )\n if self.get('issue'):\n atom += '(%s)' % self['issue']\n res.append(atom)\n res.append(self.get('pages'))\n elif genre == 'incollection' or genre == 'inproceedings':\n prefix = 'In'\n atom = ''\n if editors:\n atom += editors\n if self.get('booktitle'):\n if atom:\n atom += ','\n atom += ' %s' % self.get_with_translation('booktitle')\n if self.get('pages'):\n atom += ', %s' % self['pages']\n res.append(prefix + atom)\n else:\n if editors and self.get('author'):\n res.append('In %s' % editors)\n for attr in ['journal', 'volume' if genre != 'book' else None]:\n if attr and self.get(attr):\n res.append(self.get(attr))\n if self.get('issue'):\n res.append('(%s)' % self['issue'])\n if not pages_at_end and self.get('pages'):\n res.append(self['pages'])\n if self.get('publisher'):\n res.append(': '.join(filter(None, [self.get('address'), self[\n 'publisher']])))\n elif genre == 'misc' and self.get('howpublished'):\n res.append(self.get('howpublished'))\n if not thesis and pages_at_end and self.get('pages'):\n res.append(self['pages'] + 'pp')\n note = self.get('note') or self._genre_note.get(genre)\n if note and note not in res:\n if thesis:\n joiner = ','\n if self.get('school'):\n note += '{0} {1}'.format(joiner, self.get('school'))\n joiner = ';'\n if self.get('pages'):\n note += '{0} {1}pp.'.format(joiner, self.get('pages'))\n res.append('(%s)' % note)\n return ' '.join(x if x.endswith(('.', '.)')) else '%s.' % x for x in\n res if x)\n",
"role": "user"
},
{
"content": "text",
"role": "assistant"
}
] |
def x(self, path, method='GET', params=None, type=REST_TYPE):
response_text = self._get_http_client(type).request(path, method, params)
if not response_text:
return response_text
response_json = json.loads(response_text)
if 'errors' in response_json:
raise ErrorException([Error().load(e) for e in response_json['errors']]
)
return response_json
|
request
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, path, method='GET', params=None, type=REST_TYPE):\n response_text = self._get_http_client(type).request(path, method, params)\n if not response_text:\n return response_text\n response_json = json.loads(response_text)\n if 'errors' in response_json:\n raise ErrorException([Error().load(e) for e in response_json['errors']]\n )\n return response_json\n",
"role": "user"
},
{
"content": "request",
"role": "assistant"
}
] |
def x(self, msisdn, reference):
return HLR().load(self.request('hlr', 'POST', {'msisdn': msisdn,
'reference': reference}))
|
hlr_create
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, msisdn, reference):\n return HLR().load(self.request('hlr', 'POST', {'msisdn': msisdn,\n 'reference': reference}))\n",
"role": "user"
},
{
"content": "hlr_create",
"role": "assistant"
}
] |
def x(self, originator, recipients, body, params=None):
if params is None:
params = {}
if type(recipients) == list:
recipients = ','.join(recipients)
params.update({'originator': originator, 'body': body, 'recipients':
recipients})
return Message().load(self.request('messages', 'POST', params))
|
message_create
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, originator, recipients, body, params=None):\n if params is None:\n params = {}\n if type(recipients) == list:\n recipients = ','.join(recipients)\n params.update({'originator': originator, 'body': body, 'recipients':\n recipients})\n return Message().load(self.request('messages', 'POST', params))\n",
"role": "user"
},
{
"content": "message_create",
"role": "assistant"
}
] |
def x(self, recipients, body, params=None):
if params is None:
params = {}
if type(recipients) == list:
recipients = ','.join(recipients)
params.update({'recipients': recipients, 'body': body})
return VoiceMessage().load(self.request('voicemessages', 'POST', params))
|
voice_message_create
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, recipients, body, params=None):\n if params is None:\n params = {}\n if type(recipients) == list:\n recipients = ','.join(recipients)\n params.update({'recipients': recipients, 'body': body})\n return VoiceMessage().load(self.request('voicemessages', 'POST', params))\n",
"role": "user"
},
{
"content": "voice_message_create",
"role": "assistant"
}
] |
def x(self, phonenumber, params=None):
if params is None:
params = {}
return Lookup().load(self.request('lookup/' + str(phonenumber), 'GET',
params))
|
lookup
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, phonenumber, params=None):\n if params is None:\n params = {}\n return Lookup().load(self.request('lookup/' + str(phonenumber), 'GET',\n params))\n",
"role": "user"
},
{
"content": "lookup",
"role": "assistant"
}
] |
def x(self, phonenumber, params=None):
if params is None:
params = {}
return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr',
'GET', params))
|
lookup_hlr
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, phonenumber, params=None):\n if params is None:\n params = {}\n return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr',\n 'GET', params))\n",
"role": "user"
},
{
"content": "lookup_hlr",
"role": "assistant"
}
] |
def x(self, phonenumber, params=None):
if params is None:
params = {}
return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr',
'POST', params))
|
lookup_hlr_create
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, phonenumber, params=None):\n if params is None:\n params = {}\n return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr',\n 'POST', params))\n",
"role": "user"
},
{
"content": "lookup_hlr_create",
"role": "assistant"
}
] |
def x(self, recipient, params=None):
if params is None:
params = {}
params.update({'recipient': recipient})
return Verify().load(self.request('verify', 'POST', params))
|
verify_create
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, recipient, params=None):\n if params is None:\n params = {}\n params.update({'recipient': recipient})\n return Verify().load(self.request('verify', 'POST', params))\n",
"role": "user"
},
{
"content": "verify_create",
"role": "assistant"
}
] |
def x(self, id, token):
return Verify().load(self.request('verify/' + str(id), params={'token':
token}))
|
verify_verify
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, id, token):\n return Verify().load(self.request('verify/' + str(id), params={'token':\n token}))\n",
"role": "user"
},
{
"content": "verify_verify",
"role": "assistant"
}
] |
def x(self, value):
items = []
for item in value:
items.append(self.itemType().load(item))
self._items = items
|
items
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, value):\n items = []\n for item in value:\n items.append(self.itemType().load(item))\n self._items = items\n",
"role": "user"
},
{
"content": "items",
"role": "assistant"
}
] |
def x(self, path, method='GET', params=None):
if params is None:
params = {}
url = urljoin(self.endpoint, path)
headers = {'Accept': 'application/json', 'Authorization': 'AccessKey ' +
self.access_key, 'User-Agent': self.user_agent, 'Content-Type':
'application/json'}
if method == 'DELETE':
response = requests.delete(url, verify=True, headers=headers, data=
json.dumps(params))
elif method == 'GET':
response = requests.get(url, verify=True, headers=headers, params=
params)
elif method == 'PATCH':
response = requests.patch(url, verify=True, headers=headers, data=
json.dumps(params))
elif method == 'POST':
response = requests.post(url, verify=True, headers=headers, data=
json.dumps(params))
elif method == 'PUT':
response = requests.put(url, verify=True, headers=headers, data=
json.dumps(params))
else:
raise ValueError(str(method) + ' is not a supported HTTP method')
if response.status_code in self.__supported_status_codes:
response_text = response.text
else:
response.raise_for_status()
return response_text
|
request
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, path, method='GET', params=None):\n if params is None:\n params = {}\n url = urljoin(self.endpoint, path)\n headers = {'Accept': 'application/json', 'Authorization': 'AccessKey ' +\n self.access_key, 'User-Agent': self.user_agent, 'Content-Type':\n 'application/json'}\n if method == 'DELETE':\n response = requests.delete(url, verify=True, headers=headers, data=\n json.dumps(params))\n elif method == 'GET':\n response = requests.get(url, verify=True, headers=headers, params=\n params)\n elif method == 'PATCH':\n response = requests.patch(url, verify=True, headers=headers, data=\n json.dumps(params))\n elif method == 'POST':\n response = requests.post(url, verify=True, headers=headers, data=\n json.dumps(params))\n elif method == 'PUT':\n response = requests.put(url, verify=True, headers=headers, data=\n json.dumps(params))\n else:\n raise ValueError(str(method) + ' is not a supported HTTP method')\n if response.status_code in self.__supported_status_codes:\n response_text = response.text\n else:\n response.raise_for_status()\n return response_text\n",
"role": "user"
},
{
"content": "request",
"role": "assistant"
}
] |
def x(self):
config_h = opj('src', 'cysignals', 'cysignals_config.h')
if not os.path.isfile(config_h):
import subprocess
subprocess.check_call(['make', 'configure'])
subprocess.check_call(['sh', 'configure'])
dist = self.distribution
ext_modules = dist.ext_modules
if ext_modules:
dist.ext_modules[:] = self.cythonize(ext_modules)
_build.run(self)
|
run
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n config_h = opj('src', 'cysignals', 'cysignals_config.h')\n if not os.path.isfile(config_h):\n import subprocess\n subprocess.check_call(['make', 'configure'])\n subprocess.check_call(['sh', 'configure'])\n dist = self.distribution\n ext_modules = dist.ext_modules\n if ext_modules:\n dist.ext_modules[:] = self.cythonize(ext_modules)\n _build.run(self)\n",
"role": "user"
},
{
"content": "run",
"role": "assistant"
}
] |
def x():
files = []
for path in sys_path:
pattern = os.path.join(path, '*', 'cython_debug', 'cython_debug_info_*'
)
files.extend(glob.glob(pattern))
return files
|
cython_debug_files
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x():\n files = []\n for path in sys_path:\n pattern = os.path.join(path, '*', 'cython_debug', 'cython_debug_info_*'\n )\n files.extend(glob.glob(pattern))\n return files\n",
"role": "user"
},
{
"content": "cython_debug_files",
"role": "assistant"
}
] |
def x(self):
running = self._webcam.start()
if not running:
return running
running &= self._phoxi.start()
if not running:
self._webcam.stop()
return running
|
start
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n running = self._webcam.start()\n if not running:\n return running\n running &= self._phoxi.start()\n if not running:\n self._webcam.stop()\n return running\n",
"role": "user"
},
{
"content": "start",
"role": "assistant"
}
] |
def x(self):
if not self._running:
logging.warning('Colorized PhoXi not running. Aborting stop')
return False
self._webcam.stop()
self._phoxi.stop()
return True
|
stop
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self._running:\n logging.warning('Colorized PhoXi not running. Aborting stop')\n return False\n self._webcam.stop()\n self._phoxi.stop()\n return True\n",
"role": "user"
},
{
"content": "stop",
"role": "assistant"
}
] |
def x(self):
_, phoxi_depth_im, _ = self._phoxi.frames()
webcam_color_im, _, _ = self._webcam.frames(most_recent=True)
phoxi_color_im = self._colorize(phoxi_depth_im, webcam_color_im)
return phoxi_color_im, phoxi_depth_im, None
|
frames
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n _, phoxi_depth_im, _ = self._phoxi.frames()\n webcam_color_im, _, _ = self._webcam.frames(most_recent=True)\n phoxi_color_im = self._colorize(phoxi_depth_im, webcam_color_im)\n return phoxi_color_im, phoxi_depth_im, None\n",
"role": "user"
},
{
"content": "frames",
"role": "assistant"
}
] |
def x(self, num_img=1, fill_depth=0.0):
depths = []
for _ in range(num_img):
_, depth, _ = self.frames()
depths.append(depth)
median_depth = Image.median_images(depths)
median_depth.data[median_depth.data == 0.0] = fill_depth
return median_depth
|
median_depth_img
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, num_img=1, fill_depth=0.0):\n depths = []\n for _ in range(num_img):\n _, depth, _ = self.frames()\n depths.append(depth)\n median_depth = Image.median_images(depths)\n median_depth.data[median_depth.data == 0.0] = fill_depth\n return median_depth\n",
"role": "user"
},
{
"content": "median_depth_img",
"role": "assistant"
}
] |
def x(self, depth_im, color_im):
target_shape = depth_im.data.shape[0], depth_im.data.shape[1], 3
pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im)
pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(
pc_depth)
pc_data = pc_color.data.T
dists = np.linalg.norm(pc_data, axis=1)
order = np.argsort(dists)
pc_data = pc_data[order]
pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame)
sorted_dists = dists[order]
sorted_depths = depth_im.data.flatten()[order]
icds = self._webcam.color_intrinsics.project(pc_color).data.T
rounded_icds = np.array(icds / 3.0, dtype=np.uint32)
unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0,
return_index=True, return_inverse=True)
icd_depths = sorted_dists[unique_inds]
min_depths_pp = icd_depths[unique_inv]
depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 0.005
valid_mask = np.logical_and(np.logical_and(icds[:, 0] >= 0, icds[:, 0] <
self._webcam.color_intrinsics.width), np.logical_and(icds[:, 1] >=
0, icds[:, 1] < self._webcam.color_intrinsics.height))
valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0)
valid_mask = np.logical_and(valid_mask, depth_delta_mask)
valid_icds = icds[valid_mask]
colors = color_im.data[valid_icds[:, 1], valid_icds[:, 0], :]
color_im_data = np.zeros((target_shape[0] * target_shape[1],
target_shape[2]), dtype=np.uint8)
color_im_data[valid_mask] = colors
color_im_data[order] = color_im_data.copy()
color_im_data = color_im_data.reshape(target_shape)
return ColorImage(color_im_data, frame=self._frame)
|
colorize
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, depth_im, color_im):\n target_shape = depth_im.data.shape[0], depth_im.data.shape[1], 3\n pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im)\n pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(\n pc_depth)\n pc_data = pc_color.data.T\n dists = np.linalg.norm(pc_data, axis=1)\n order = np.argsort(dists)\n pc_data = pc_data[order]\n pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame)\n sorted_dists = dists[order]\n sorted_depths = depth_im.data.flatten()[order]\n icds = self._webcam.color_intrinsics.project(pc_color).data.T\n rounded_icds = np.array(icds / 3.0, dtype=np.uint32)\n unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0,\n return_index=True, return_inverse=True)\n icd_depths = sorted_dists[unique_inds]\n min_depths_pp = icd_depths[unique_inv]\n depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 0.005\n valid_mask = np.logical_and(np.logical_and(icds[:, 0] >= 0, icds[:, 0] <\n self._webcam.color_intrinsics.width), np.logical_and(icds[:, 1] >= \n 0, icds[:, 1] < self._webcam.color_intrinsics.height))\n valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0)\n valid_mask = np.logical_and(valid_mask, depth_delta_mask)\n valid_icds = icds[valid_mask]\n colors = color_im.data[valid_icds[:, 1], valid_icds[:, 0], :]\n color_im_data = np.zeros((target_shape[0] * target_shape[1],\n target_shape[2]), dtype=np.uint8)\n color_im_data[valid_mask] = colors\n color_im_data[order] = color_im_data.copy()\n color_im_data = color_im_data.reshape(target_shape)\n return ColorImage(color_im_data, frame=self._frame)\n",
"role": "user"
},
{
"content": "colorize",
"role": "assistant"
}
] |
def x(sensor_type, cfg):
sensor_type = sensor_type.lower()
if sensor_type == 'kinect2':
s = Kinect2Sensor(packet_pipeline_mode=cfg['pipeline_mode'],
device_num=cfg['device_num'], frame=cfg['frame'])
elif sensor_type == 'bridged_kinect2':
s = KinectSensorBridged(quality=cfg['quality'], frame=cfg['frame'])
elif sensor_type == 'primesense':
flip_images = True
if 'flip_images' in cfg.keys():
flip_images = cfg['flip_images']
s = PrimesenseSensor(auto_white_balance=cfg['auto_white_balance'],
flip_images=flip_images, frame=cfg['frame'])
elif sensor_type == 'virtual':
s = VirtualSensor(cfg['image_dir'], frame=cfg['frame'])
elif sensor_type == 'tensor_dataset':
s = TensorDatasetVirtualSensor(cfg['dataset_dir'], frame=cfg['frame'])
elif sensor_type == 'primesense_ros':
s = PrimesenseSensor_ROS(frame=cfg['frame'])
elif sensor_type == 'ensenso':
s = EnsensoSensor(frame=cfg['frame'])
elif sensor_type == 'phoxi':
s = PhoXiSensor(frame=cfg['frame'], device_name=cfg['device_name'],
size=cfg['size'])
elif sensor_type == 'webcam':
s = WebcamSensor(frame=cfg['frame'], device_id=cfg['device_id'])
elif sensor_type == 'colorized_phoxi':
s = ColorizedPhoXiSensor(frame=cfg['frame'], phoxi_config=cfg[
'phoxi_config'], webcam_config=cfg['webcam_config'], calib_dir=
cfg['calib_dir'])
elif sensor_type == 'realsense':
s = RealSenseSensor(cam_id=cfg['cam_id'], filter_depth=cfg[
'filter_depth'], frame=cfg['frame'])
else:
raise ValueError('RGBD sensor type %s not supported' % sensor_type)
return s
|
sensor
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(sensor_type, cfg):\n sensor_type = sensor_type.lower()\n if sensor_type == 'kinect2':\n s = Kinect2Sensor(packet_pipeline_mode=cfg['pipeline_mode'],\n device_num=cfg['device_num'], frame=cfg['frame'])\n elif sensor_type == 'bridged_kinect2':\n s = KinectSensorBridged(quality=cfg['quality'], frame=cfg['frame'])\n elif sensor_type == 'primesense':\n flip_images = True\n if 'flip_images' in cfg.keys():\n flip_images = cfg['flip_images']\n s = PrimesenseSensor(auto_white_balance=cfg['auto_white_balance'],\n flip_images=flip_images, frame=cfg['frame'])\n elif sensor_type == 'virtual':\n s = VirtualSensor(cfg['image_dir'], frame=cfg['frame'])\n elif sensor_type == 'tensor_dataset':\n s = TensorDatasetVirtualSensor(cfg['dataset_dir'], frame=cfg['frame'])\n elif sensor_type == 'primesense_ros':\n s = PrimesenseSensor_ROS(frame=cfg['frame'])\n elif sensor_type == 'ensenso':\n s = EnsensoSensor(frame=cfg['frame'])\n elif sensor_type == 'phoxi':\n s = PhoXiSensor(frame=cfg['frame'], device_name=cfg['device_name'],\n size=cfg['size'])\n elif sensor_type == 'webcam':\n s = WebcamSensor(frame=cfg['frame'], device_id=cfg['device_id'])\n elif sensor_type == 'colorized_phoxi':\n s = ColorizedPhoXiSensor(frame=cfg['frame'], phoxi_config=cfg[\n 'phoxi_config'], webcam_config=cfg['webcam_config'], calib_dir=\n cfg['calib_dir'])\n elif sensor_type == 'realsense':\n s = RealSenseSensor(cam_id=cfg['cam_id'], filter_depth=cfg[\n 'filter_depth'], frame=cfg['frame'])\n else:\n raise ValueError('RGBD sensor type %s not supported' % sensor_type)\n return s\n",
"role": "user"
},
{
"content": "sensor",
"role": "assistant"
}
] |
def x(point, all_points, eps=0.0001):
inds = np.where(np.linalg.norm(point - all_points, axis=1) < eps)
if inds[0].shape[0] == 0:
return -1
return inds[0][0]
|
get_point_index
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(point, all_points, eps=0.0001):\n inds = np.where(np.linalg.norm(point - all_points, axis=1) < eps)\n if inds[0].shape[0] == 0:\n return -1\n return inds[0][0]\n",
"role": "user"
},
{
"content": "get_point_index",
"role": "assistant"
}
] |
def x(self, source_obj_features, target_obj_features):
if not isinstance(source_obj_features, f.BagOfFeatures):
raise ValueError('Must supply source bag of object features')
if not isinstance(target_obj_features, f.BagOfFeatures):
raise ValueError('Must supply target bag of object features')
source_descriptors = source_obj_features.descriptors
target_descriptors = target_obj_features.descriptors
source_keypoints = source_obj_features.keypoints
target_keypoints = target_obj_features.keypoints
dists = spatial.distance.cdist(source_descriptors, target_descriptors)
source_closest_descriptors = dists.argmin(axis=1)
target_closest_descriptors = dists.argmin(axis=0)
match_indices = []
source_matched_points = np.zeros((0, 3))
target_matched_points = np.zeros((0, 3))
for i, j in enumerate(source_closest_descriptors):
if target_closest_descriptors[j] == i:
match_indices.append(j)
source_matched_points = np.r_[source_matched_points,
source_keypoints[i:i + 1, :]]
target_matched_points = np.r_[target_matched_points,
target_keypoints[j:j + 1, :]]
else:
match_indices.append(-1)
return Correspondences(match_indices, source_matched_points,
target_matched_points)
|
match
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, source_obj_features, target_obj_features):\n if not isinstance(source_obj_features, f.BagOfFeatures):\n raise ValueError('Must supply source bag of object features')\n if not isinstance(target_obj_features, f.BagOfFeatures):\n raise ValueError('Must supply target bag of object features')\n source_descriptors = source_obj_features.descriptors\n target_descriptors = target_obj_features.descriptors\n source_keypoints = source_obj_features.keypoints\n target_keypoints = target_obj_features.keypoints\n dists = spatial.distance.cdist(source_descriptors, target_descriptors)\n source_closest_descriptors = dists.argmin(axis=1)\n target_closest_descriptors = dists.argmin(axis=0)\n match_indices = []\n source_matched_points = np.zeros((0, 3))\n target_matched_points = np.zeros((0, 3))\n for i, j in enumerate(source_closest_descriptors):\n if target_closest_descriptors[j] == i:\n match_indices.append(j)\n source_matched_points = np.r_[source_matched_points,\n source_keypoints[i:i + 1, :]]\n target_matched_points = np.r_[target_matched_points,\n target_keypoints[j:j + 1, :]]\n else:\n match_indices.append(-1)\n return Correspondences(match_indices, source_matched_points,\n target_matched_points)\n",
"role": "user"
},
{
"content": "match",
"role": "assistant"
}
] |
def x(self, source_points, target_points, source_normals, target_normals):
dists = ssd.cdist(source_points, target_points, 'euclidean')
ip = source_normals.dot(target_normals.T)
source_ip = source_points.dot(target_normals.T)
target_ip = target_points.dot(target_normals.T)
target_ip = np.diag(target_ip)
target_ip = np.tile(target_ip, [source_points.shape[0], 1])
abs_diff = np.abs(source_ip - target_ip)
invalid_dists = np.where(dists > self.dist_thresh_)
abs_diff[invalid_dists[0], invalid_dists[1]] = np.inf
invalid_norms = np.where(ip < self.norm_thresh_)
abs_diff[invalid_norms[0], invalid_norms[1]] = np.inf
match_indices = np.argmin(abs_diff, axis=1)
match_vals = np.min(abs_diff, axis=1)
invalid_matches = np.where(match_vals == np.inf)
match_indices[invalid_matches[0]] = -1
return NormalCorrespondences(match_indices, source_points,
target_points, source_normals, target_normals)
|
match
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, source_points, target_points, source_normals, target_normals):\n dists = ssd.cdist(source_points, target_points, 'euclidean')\n ip = source_normals.dot(target_normals.T)\n source_ip = source_points.dot(target_normals.T)\n target_ip = target_points.dot(target_normals.T)\n target_ip = np.diag(target_ip)\n target_ip = np.tile(target_ip, [source_points.shape[0], 1])\n abs_diff = np.abs(source_ip - target_ip)\n invalid_dists = np.where(dists > self.dist_thresh_)\n abs_diff[invalid_dists[0], invalid_dists[1]] = np.inf\n invalid_norms = np.where(ip < self.norm_thresh_)\n abs_diff[invalid_norms[0], invalid_norms[1]] = np.inf\n match_indices = np.argmin(abs_diff, axis=1)\n match_vals = np.min(abs_diff, axis=1)\n invalid_matches = np.where(match_vals == np.inf)\n match_indices[invalid_matches[0]] = -1\n return NormalCorrespondences(match_indices, source_points,\n target_points, source_normals, target_normals)\n",
"role": "user"
},
{
"content": "match",
"role": "assistant"
}
] |
def x(self):
self._cfg.enable_device(self.id)
self._cfg.enable_stream(rs.stream.color, RealSenseSensor.COLOR_IM_WIDTH,
RealSenseSensor.COLOR_IM_HEIGHT, rs.format.bgr8, RealSenseSensor.FPS)
self._cfg.enable_stream(rs.stream.depth, RealSenseSensor.DEPTH_IM_WIDTH,
360 if self._depth_align else RealSenseSensor.DEPTH_IM_HEIGHT, rs.
format.z16, RealSenseSensor.FPS)
|
config_pipe
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self._cfg.enable_device(self.id)\n self._cfg.enable_stream(rs.stream.color, RealSenseSensor.COLOR_IM_WIDTH,\n RealSenseSensor.COLOR_IM_HEIGHT, rs.format.bgr8, RealSenseSensor.FPS)\n self._cfg.enable_stream(rs.stream.depth, RealSenseSensor.DEPTH_IM_WIDTH,\n 360 if self._depth_align else RealSenseSensor.DEPTH_IM_HEIGHT, rs.\n format.z16, RealSenseSensor.FPS)\n",
"role": "user"
},
{
"content": "config_pipe",
"role": "assistant"
}
] |
def x(self):
sensor = self._profile.get_device().first_depth_sensor()
self._depth_scale = sensor.get_depth_scale()
|
set_depth_scale
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n sensor = self._profile.get_device().first_depth_sensor()\n self._depth_scale = sensor.get_depth_scale()\n",
"role": "user"
},
{
"content": "set_depth_scale",
"role": "assistant"
}
] |
def x(self):
strm = self._profile.get_stream(rs.stream.color)
obj = strm.as_video_stream_profile().get_intrinsics()
self._intrinsics[0, 0] = obj.fx
self._intrinsics[1, 1] = obj.fy
self._intrinsics[0, 2] = obj.ppx
self._intrinsics[1, 2] = obj.ppy
|
set_intrinsics
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n strm = self._profile.get_stream(rs.stream.color)\n obj = strm.as_video_stream_profile().get_intrinsics()\n self._intrinsics[0, 0] = obj.fx\n self._intrinsics[1, 1] = obj.fy\n self._intrinsics[0, 2] = obj.ppx\n self._intrinsics[1, 2] = obj.ppy\n",
"role": "user"
},
{
"content": "set_intrinsics",
"role": "assistant"
}
] |
def x(self):
return CameraIntrinsics(self._frame, self._intrinsics[0, 0], self.
_intrinsics[1, 1], self._intrinsics[0, 2], self._intrinsics[1, 2],
height=RealSenseSensor.COLOR_IM_HEIGHT, width=RealSenseSensor.
COLOR_IM_WIDTH)
|
color_intrinsics
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return CameraIntrinsics(self._frame, self._intrinsics[0, 0], self.\n _intrinsics[1, 1], self._intrinsics[0, 2], self._intrinsics[1, 2],\n height=RealSenseSensor.COLOR_IM_HEIGHT, width=RealSenseSensor.\n COLOR_IM_WIDTH)\n",
"role": "user"
},
{
"content": "color_intrinsics",
"role": "assistant"
}
] |
def x(self):
try:
self._depth_align = False
if self._registration_mode == RealSenseRegistrationMode.DEPTH_TO_COLOR:
self._depth_align = True
self._config_pipe()
self._profile = self._pipe.start(self._cfg)
self._set_depth_scale()
self._set_intrinsics()
for _ in range(5):
self._pipe.wait_for_frames()
self._running = True
except RuntimeError as e:
print(e)
|
start
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n try:\n self._depth_align = False\n if self._registration_mode == RealSenseRegistrationMode.DEPTH_TO_COLOR:\n self._depth_align = True\n self._config_pipe()\n self._profile = self._pipe.start(self._cfg)\n self._set_depth_scale()\n self._set_intrinsics()\n for _ in range(5):\n self._pipe.wait_for_frames()\n self._running = True\n except RuntimeError as e:\n print(e)\n",
"role": "user"
},
{
"content": "start",
"role": "assistant"
}
] |
def x(self):
if not self._running:
logging.warning('Realsense not running. Aborting stop.')
return False
self._pipe.stop()
self._running = False
return True
|
stop
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self._running:\n logging.warning('Realsense not running. Aborting stop.')\n return False\n self._pipe.stop()\n self._running = False\n return True\n",
"role": "user"
},
{
"content": "stop",
"role": "assistant"
}
] |
def x(self):
frames = self._pipe.wait_for_frames()
if self._depth_align:
frames = self._align.process(frames)
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
logging.warning('Could not retrieve frames.')
return None, None
if self._filter_depth:
depth_frame = self._filter_depth_frame(depth_frame)
depth_image = self._to_numpy(depth_frame, np.float32)
color_image = self._to_numpy(color_frame, np.uint8)
depth_image *= self._depth_scale
color_image = color_image[..., ::-1]
depth = DepthImage(depth_image, frame=self._frame)
color = ColorImage(color_image, frame=self._frame)
return color, depth
|
read_color_and_depth_image
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n frames = self._pipe.wait_for_frames()\n if self._depth_align:\n frames = self._align.process(frames)\n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n if not depth_frame or not color_frame:\n logging.warning('Could not retrieve frames.')\n return None, None\n if self._filter_depth:\n depth_frame = self._filter_depth_frame(depth_frame)\n depth_image = self._to_numpy(depth_frame, np.float32)\n color_image = self._to_numpy(color_frame, np.uint8)\n depth_image *= self._depth_scale\n color_image = color_image[..., ::-1]\n depth = DepthImage(depth_image, frame=self._frame)\n color = ColorImage(color_image, frame=self._frame)\n return color, depth\n",
"role": "user"
},
{
"content": "read_color_and_depth_image",
"role": "assistant"
}
] |
def x(self):
self._cap = cv2.VideoCapture(self._device_id + cv2.CAP_V4L2)
if not self._cap.isOpened():
self._running = False
self._cap.release()
self._cap = None
return False
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, self._camera_intr.width)
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self._camera_intr.height)
self._running = True
for _ in range(5):
_ = self.frames()
return True
|
start
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self._cap = cv2.VideoCapture(self._device_id + cv2.CAP_V4L2)\n if not self._cap.isOpened():\n self._running = False\n self._cap.release()\n self._cap = None\n return False\n self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, self._camera_intr.width)\n self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self._camera_intr.height)\n self._running = True\n for _ in range(5):\n _ = self.frames()\n return True\n",
"role": "user"
},
{
"content": "start",
"role": "assistant"
}
] |
def x(self):
if not self._running:
logging.warning('Webcam not running. Aborting stop')
return False
if self._cap:
self._cap.release()
self._cap = None
self._running = False
return True
|
stop
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self._running:\n logging.warning('Webcam not running. Aborting stop')\n return False\n if self._cap:\n self._cap.release()\n self._cap = None\n self._running = False\n return True\n",
"role": "user"
},
{
"content": "stop",
"role": "assistant"
}
] |
def x(self, most_recent=False):
if most_recent:
for i in xrange(4):
self._cap.grab()
for i in range(1):
if self._adjust_exposure:
try:
command = (
'v4l2-ctl -d /dev/video{} -c exposure_auto=1 -c exposure_auto_priority=0 -c exposure_absolute=100 -c saturation=60 -c gain=140'
.format(self._device_id))
FNULL = open(os.devnull, 'w')
subprocess.call(shlex.split(command), stdout=FNULL, stderr=
subprocess.STDOUT)
except:
pass
ret, frame = self._cap.read()
rgb_data = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return ColorImage(rgb_data, frame=self._frame), None, None
|
frames
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, most_recent=False):\n if most_recent:\n for i in xrange(4):\n self._cap.grab()\n for i in range(1):\n if self._adjust_exposure:\n try:\n command = (\n 'v4l2-ctl -d /dev/video{} -c exposure_auto=1 -c exposure_auto_priority=0 -c exposure_absolute=100 -c saturation=60 -c gain=140'\n .format(self._device_id))\n FNULL = open(os.devnull, 'w')\n subprocess.call(shlex.split(command), stdout=FNULL, stderr=\n subprocess.STDOUT)\n except:\n pass\n ret, frame = self._cap.read()\n rgb_data = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n return ColorImage(rgb_data, frame=self._frame), None, None\n",
"role": "user"
},
{
"content": "frames",
"role": "assistant"
}
] |
def x(self, msg):
num_points = msg.height * msg.width
self._format = '<' + num_points * 'ffff'
|
set_format
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, msg):\n num_points = msg.height * msg.width\n self._format = '<' + num_points * 'ffff'\n",
"role": "user"
},
{
"content": "set_format",
"role": "assistant"
}
] |
def x(self, msg):
focal_x = msg.K[0]
focal_y = msg.K[4]
center_x = msg.K[2]
center_y = msg.K[5]
im_height = msg.height
im_width = msg.width
self._camera_intr = CameraIntrinsics(self._frame, focal_x, focal_y,
center_x, center_y, height=im_height, width=im_width)
|
set_camera_properties
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, msg):\n focal_x = msg.K[0]\n focal_y = msg.K[4]\n center_x = msg.K[2]\n center_y = msg.K[5]\n im_height = msg.height\n im_width = msg.width\n self._camera_intr = CameraIntrinsics(self._frame, focal_x, focal_y,\n center_x, center_y, height=im_height, width=im_width)\n",
"role": "user"
},
{
"content": "set_camera_properties",
"role": "assistant"
}
] |
def x(self, msg):
if self._format is None:
self._set_format(msg)
if msg.height != self._camera_intr.height:
rescale_factor = float(msg.height) / self._camera_intr.height
self._camera_intr = self._camera_intr.resize(rescale_factor)
num_points = msg.height * msg.width
raw_tup = struct.Struct(self._format).unpack_from(msg.data, 0)
raw_arr = np.array(raw_tup)
depth_ind = 2 + 4 * np.arange(num_points)
depth_buf = raw_arr[depth_ind]
depth_arr = depth_buf.reshape(msg.height, msg.width)
depth_im = DepthImage(depth_arr, frame=self._frame)
return depth_im
|
depth_im_from_pointcloud
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, msg):\n if self._format is None:\n self._set_format(msg)\n if msg.height != self._camera_intr.height:\n rescale_factor = float(msg.height) / self._camera_intr.height\n self._camera_intr = self._camera_intr.resize(rescale_factor)\n num_points = msg.height * msg.width\n raw_tup = struct.Struct(self._format).unpack_from(msg.data, 0)\n raw_arr = np.array(raw_tup)\n depth_ind = 2 + 4 * np.arange(num_points)\n depth_buf = raw_arr[depth_ind]\n depth_arr = depth_buf.reshape(msg.height, msg.width)\n depth_im = DepthImage(depth_arr, frame=self._frame)\n return depth_im\n",
"role": "user"
},
{
"content": "depth_im_from_pointcloud",
"role": "assistant"
}
] |
def x(self):
self._pointcloud_sub = rospy.Subscriber('/%s/depth/points' % self.frame,
PointCloud2, self._pointcloud_callback)
self._camera_info_sub = rospy.Subscriber('/%s/left/camera_info' % self.
frame, CameraInfo, self._camera_info_callback)
while self._camera_intr is None:
time.sleep(0.1)
self._running = True
|
start
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self._pointcloud_sub = rospy.Subscriber('/%s/depth/points' % self.frame,\n PointCloud2, self._pointcloud_callback)\n self._camera_info_sub = rospy.Subscriber('/%s/left/camera_info' % self.\n frame, CameraInfo, self._camera_info_callback)\n while self._camera_intr is None:\n time.sleep(0.1)\n self._running = True\n",
"role": "user"
},
{
"content": "start",
"role": "assistant"
}
] |
def x(self):
if not self._running:
logging.warning('Ensenso not running. Aborting stop')
return False
self._pointcloud_sub.unregister()
self._camera_info_sub.unregister()
self._running = False
return True
|
stop
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self._running:\n logging.warning('Ensenso not running. Aborting stop')\n return False\n self._pointcloud_sub.unregister()\n self._camera_info_sub.unregister()\n self._running = False\n return True\n",
"role": "user"
},
{
"content": "stop",
"role": "assistant"
}
] |
def x(self):
while self._cur_depth_im is None:
time.sleep(0.01)
depth_im = self._cur_depth_im
color_im = ColorImage(np.zeros([depth_im.height, depth_im.width, 3]).
astype(np.uint8), frame=self._frame)
self._cur_depth_im = None
return color_im, depth_im, None
|
frames
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n while self._cur_depth_im is None:\n time.sleep(0.01)\n depth_im = self._cur_depth_im\n color_im = ColorImage(np.zeros([depth_im.height, depth_im.width, 3]).\n astype(np.uint8), frame=self._frame)\n self._cur_depth_im = None\n return color_im, depth_im, None\n",
"role": "user"
},
{
"content": "frames",
"role": "assistant"
}
] |
def x(self, source_point_cloud, target_point_cloud, source_normal_cloud,
target_normal_cloud, matcher, num_iterations=1, compute_total_cost=True,
match_centroids=False, vis=False):
pass
|
register
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, source_point_cloud, target_point_cloud, source_normal_cloud,\n target_normal_cloud, matcher, num_iterations=1, compute_total_cost=True,\n match_centroids=False, vis=False):\n pass\n",
"role": "user"
},
{
"content": "register",
"role": "assistant"
}
] |
def x(self):
if not self._running:
raise RuntimeError(
'Device pointing to %s not runnning. Cannot read frames' % self
._path_to_images)
if self._im_index >= self._num_images:
raise RuntimeError('Device is out of images')
color_filename = os.path.join(self._path_to_images, 'color_%d%s' % (
self._im_index, self._color_ext))
color_im = ColorImage.open(color_filename, frame=self._frame)
depth_filename = os.path.join(self._path_to_images, 'depth_%d.npy' %
self._im_index)
depth_im = DepthImage.open(depth_filename, frame=self._frame)
self._im_index = (self._im_index + 1) % self._num_images
return color_im, depth_im, None
|
frames
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self._running:\n raise RuntimeError(\n 'Device pointing to %s not runnning. Cannot read frames' % self\n ._path_to_images)\n if self._im_index >= self._num_images:\n raise RuntimeError('Device is out of images')\n color_filename = os.path.join(self._path_to_images, 'color_%d%s' % (\n self._im_index, self._color_ext))\n color_im = ColorImage.open(color_filename, frame=self._frame)\n depth_filename = os.path.join(self._path_to_images, 'depth_%d.npy' %\n self._im_index)\n depth_im = DepthImage.open(depth_filename, frame=self._frame)\n self._im_index = (self._im_index + 1) % self._num_images\n return color_im, depth_im, None\n",
"role": "user"
},
{
"content": "frames",
"role": "assistant"
}
] |
def x(self, num_img=1):
depths = []
for _ in range(num_img):
_, depth, _ = self.frames()
depths.append(depth)
return Image.median_images(depths)
|
median_depth_img
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, num_img=1):\n depths = []\n for _ in range(num_img):\n _, depth, _ = self.frames()\n depths.append(depth)\n return Image.median_images(depths)\n",
"role": "user"
},
{
"content": "median_depth_img",
"role": "assistant"
}
] |
def x(self):
if not self._running:
raise RuntimeError(
'Device pointing to %s not runnning. Cannot read frames' % self
._path_to_images)
if self._im_index >= self._num_images:
raise RuntimeError('Device is out of images')
datapoint = self._dataset.datapoint(self._im_index,
TensorDatasetVirtualSensor.IMAGE_FIELDS)
color_im = ColorImage(datapoint[TensorDatasetVirtualSensor.
COLOR_IM_FIELD], frame=self._frame)
depth_im = DepthImage(datapoint[TensorDatasetVirtualSensor.
DEPTH_IM_FIELD], frame=self._frame)
if self._image_rescale_factor != 1.0:
color_im = color_im.resize(self._image_rescale_factor)
depth_im = depth_im.resize(self._image_rescale_factor, interp='nearest'
)
self._im_index = (self._im_index + 1) % self._num_images
return color_im, depth_im, None
|
frames
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self._running:\n raise RuntimeError(\n 'Device pointing to %s not runnning. Cannot read frames' % self\n ._path_to_images)\n if self._im_index >= self._num_images:\n raise RuntimeError('Device is out of images')\n datapoint = self._dataset.datapoint(self._im_index,\n TensorDatasetVirtualSensor.IMAGE_FIELDS)\n color_im = ColorImage(datapoint[TensorDatasetVirtualSensor.\n COLOR_IM_FIELD], frame=self._frame)\n depth_im = DepthImage(datapoint[TensorDatasetVirtualSensor.\n DEPTH_IM_FIELD], frame=self._frame)\n if self._image_rescale_factor != 1.0:\n color_im = color_im.resize(self._image_rescale_factor)\n depth_im = depth_im.resize(self._image_rescale_factor, interp='nearest'\n )\n self._im_index = (self._im_index + 1) % self._num_images\n return color_im, depth_im, None\n",
"role": "user"
},
{
"content": "frames",
"role": "assistant"
}
] |
def x(self):
return CameraIntrinsics(self._ir_frame, PrimesenseSensor.FOCAL_X,
PrimesenseSensor.FOCAL_Y, PrimesenseSensor.CENTER_X,
PrimesenseSensor.CENTER_Y, height=PrimesenseSensor.DEPTH_IM_HEIGHT,
width=PrimesenseSensor.DEPTH_IM_WIDTH)
|
color_intrinsics
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return CameraIntrinsics(self._ir_frame, PrimesenseSensor.FOCAL_X,\n PrimesenseSensor.FOCAL_Y, PrimesenseSensor.CENTER_X,\n PrimesenseSensor.CENTER_Y, height=PrimesenseSensor.DEPTH_IM_HEIGHT,\n width=PrimesenseSensor.DEPTH_IM_WIDTH)\n",
"role": "user"
},
{
"content": "color_intrinsics",
"role": "assistant"
}
] |
def x(self):
openni2.initialize(PrimesenseSensor.OPENNI2_PATH)
self._device = openni2.Device.open_any()
self._depth_stream = self._device.create_depth_stream()
self._depth_stream.configure_mode(PrimesenseSensor.DEPTH_IM_WIDTH,
PrimesenseSensor.DEPTH_IM_HEIGHT, PrimesenseSensor.FPS, openni2.
PIXEL_FORMAT_DEPTH_1_MM)
self._depth_stream.start()
self._color_stream = self._device.create_color_stream()
self._color_stream.configure_mode(PrimesenseSensor.COLOR_IM_WIDTH,
PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.FPS, openni2.
PIXEL_FORMAT_RGB888)
self._color_stream.camera.set_auto_white_balance(self._auto_white_balance)
self._color_stream.camera.set_auto_exposure(self._auto_exposure)
self._color_stream.start()
if self._registration_mode == PrimesenseRegistrationMode.DEPTH_TO_COLOR:
self._device.set_image_registration_mode(openni2.
IMAGE_REGISTRATION_DEPTH_TO_COLOR)
else:
self._device.set_image_registration_mode(openni2.IMAGE_REGISTRATION_OFF
)
self._device.set_depth_color_sync_enabled(self._enable_depth_color_sync)
self._running = True
|
start
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n openni2.initialize(PrimesenseSensor.OPENNI2_PATH)\n self._device = openni2.Device.open_any()\n self._depth_stream = self._device.create_depth_stream()\n self._depth_stream.configure_mode(PrimesenseSensor.DEPTH_IM_WIDTH,\n PrimesenseSensor.DEPTH_IM_HEIGHT, PrimesenseSensor.FPS, openni2.\n PIXEL_FORMAT_DEPTH_1_MM)\n self._depth_stream.start()\n self._color_stream = self._device.create_color_stream()\n self._color_stream.configure_mode(PrimesenseSensor.COLOR_IM_WIDTH,\n PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.FPS, openni2.\n PIXEL_FORMAT_RGB888)\n self._color_stream.camera.set_auto_white_balance(self._auto_white_balance)\n self._color_stream.camera.set_auto_exposure(self._auto_exposure)\n self._color_stream.start()\n if self._registration_mode == PrimesenseRegistrationMode.DEPTH_TO_COLOR:\n self._device.set_image_registration_mode(openni2.\n IMAGE_REGISTRATION_DEPTH_TO_COLOR)\n else:\n self._device.set_image_registration_mode(openni2.IMAGE_REGISTRATION_OFF\n )\n self._device.set_depth_color_sync_enabled(self._enable_depth_color_sync)\n self._running = True\n",
"role": "user"
},
{
"content": "start",
"role": "assistant"
}
] |
def x(self):
if not self._running or self._device is None:
logging.warning('Primesense not running. Aborting stop')
return False
if self._depth_stream:
self._depth_stream.stop()
if self._color_stream:
self._color_stream.stop()
self._running = False
openni2.unload()
return True
|
stop
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self._running or self._device is None:\n logging.warning('Primesense not running. Aborting stop')\n return False\n if self._depth_stream:\n self._depth_stream.stop()\n if self._color_stream:\n self._color_stream.stop()\n self._running = False\n openni2.unload()\n return True\n",
"role": "user"
},
{
"content": "stop",
"role": "assistant"
}
] |
def x(self):
im_arr = self._depth_stream.read_frame()
raw_buf = im_arr.get_buffer_as_uint16()
buf_array = np.array([raw_buf[i] for i in range(PrimesenseSensor.
DEPTH_IM_WIDTH * PrimesenseSensor.DEPTH_IM_HEIGHT)])
depth_image = buf_array.reshape(PrimesenseSensor.DEPTH_IM_HEIGHT,
PrimesenseSensor.DEPTH_IM_WIDTH)
depth_image = depth_image * MM_TO_METERS
if self._flip_images:
depth_image = np.flipud(depth_image)
else:
depth_image = np.fliplr(depth_image)
return DepthImage(depth_image, frame=self._frame)
|
read_depth_image
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n im_arr = self._depth_stream.read_frame()\n raw_buf = im_arr.get_buffer_as_uint16()\n buf_array = np.array([raw_buf[i] for i in range(PrimesenseSensor.\n DEPTH_IM_WIDTH * PrimesenseSensor.DEPTH_IM_HEIGHT)])\n depth_image = buf_array.reshape(PrimesenseSensor.DEPTH_IM_HEIGHT,\n PrimesenseSensor.DEPTH_IM_WIDTH)\n depth_image = depth_image * MM_TO_METERS\n if self._flip_images:\n depth_image = np.flipud(depth_image)\n else:\n depth_image = np.fliplr(depth_image)\n return DepthImage(depth_image, frame=self._frame)\n",
"role": "user"
},
{
"content": "read_depth_image",
"role": "assistant"
}
] |
def x(self):
im_arr = self._color_stream.read_frame()
raw_buf = im_arr.get_buffer_as_triplet()
r_array = np.array([raw_buf[i][0] for i in range(PrimesenseSensor.
COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])
g_array = np.array([raw_buf[i][1] for i in range(PrimesenseSensor.
COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])
b_array = np.array([raw_buf[i][2] for i in range(PrimesenseSensor.
COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])
color_image = np.zeros([PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.COLOR_IM_WIDTH, 3])
color_image[:, :, 0] = r_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.COLOR_IM_WIDTH)
color_image[:, :, 1] = g_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.COLOR_IM_WIDTH)
color_image[:, :, 2] = b_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.COLOR_IM_WIDTH)
if self._flip_images:
color_image = np.flipud(color_image.astype(np.uint8))
else:
color_image = np.fliplr(color_image.astype(np.uint8))
return ColorImage(color_image, frame=self._frame)
|
read_color_image
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n im_arr = self._color_stream.read_frame()\n raw_buf = im_arr.get_buffer_as_triplet()\n r_array = np.array([raw_buf[i][0] for i in range(PrimesenseSensor.\n COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])\n g_array = np.array([raw_buf[i][1] for i in range(PrimesenseSensor.\n COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])\n b_array = np.array([raw_buf[i][2] for i in range(PrimesenseSensor.\n COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])\n color_image = np.zeros([PrimesenseSensor.COLOR_IM_HEIGHT,\n PrimesenseSensor.COLOR_IM_WIDTH, 3])\n color_image[:, :, 0] = r_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,\n PrimesenseSensor.COLOR_IM_WIDTH)\n color_image[:, :, 1] = g_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,\n PrimesenseSensor.COLOR_IM_WIDTH)\n color_image[:, :, 2] = b_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,\n PrimesenseSensor.COLOR_IM_WIDTH)\n if self._flip_images:\n color_image = np.flipud(color_image.astype(np.uint8))\n else:\n color_image = np.fliplr(color_image.astype(np.uint8))\n return ColorImage(color_image, frame=self._frame)\n",
"role": "user"
},
{
"content": "read_color_image",
"role": "assistant"
}
] |
def x(self):
color_im = self._read_color_image()
depth_im = self._read_depth_image()
return color_im, depth_im, None
|
frames
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n color_im = self._read_color_image()\n depth_im = self._read_depth_image()\n return color_im, depth_im, None\n",
"role": "user"
},
{
"content": "frames",
"role": "assistant"
}
] |
def x(self, num_img=1):
depths = []
for _ in range(num_img):
_, depth, _ = self.frames()
depths.append(depth)
return Image.min_images(depths)
|
min_depth_img
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, num_img=1):\n depths = []\n for _ in range(num_img):\n _, depth, _ = self.frames()\n depths.append(depth)\n return Image.min_images(depths)\n",
"role": "user"
},
{
"content": "min_depth_img",
"role": "assistant"
}
] |
def x(self, stream_buffer, number, staleness_limit=10.0):
rospy.wait_for_service(stream_buffer, timeout=self.timeout)
ros_image_buffer = rospy.ServiceProxy(stream_buffer, ImageBuffer)
ret = ros_image_buffer(number, 1)
if not staleness_limit == None:
if ret.timestamps[-1] > staleness_limit:
raise RuntimeError(
'Got data {0} seconds old, more than allowed {1} seconds'.
format(ret.timestamps[-1], staleness_limit))
data = ret.data.reshape(ret.data_dim1, ret.data_dim2, ret.data_dim3
).astype(ret.dtype)
if number == 1:
return [data]
return np.dsplit(data, number)
|
ros_read_images
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, stream_buffer, number, staleness_limit=10.0):\n rospy.wait_for_service(stream_buffer, timeout=self.timeout)\n ros_image_buffer = rospy.ServiceProxy(stream_buffer, ImageBuffer)\n ret = ros_image_buffer(number, 1)\n if not staleness_limit == None:\n if ret.timestamps[-1] > staleness_limit:\n raise RuntimeError(\n 'Got data {0} seconds old, more than allowed {1} seconds'.\n format(ret.timestamps[-1], staleness_limit))\n data = ret.data.reshape(ret.data_dim1, ret.data_dim2, ret.data_dim3\n ).astype(ret.dtype)\n if number == 1:\n return [data]\n return np.dsplit(data, number)\n",
"role": "user"
},
{
"content": "ros_read_images",
"role": "assistant"
}
] |
def x(self, num_images):
depth_images = self._ros_read_images(self._depth_image_buffer,
num_images, self.staleness_limit)
for i in range(0, num_images):
depth_images[i] = depth_images[i] * MM_TO_METERS
if self._flip_images:
depth_images[i] = np.flipud(depth_images[i])
depth_images[i] = np.fliplr(depth_images[i])
depth_images[i] = DepthImage(depth_images[i], frame=self._frame)
return depth_images
|
read_depth_images
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, num_images):\n depth_images = self._ros_read_images(self._depth_image_buffer,\n num_images, self.staleness_limit)\n for i in range(0, num_images):\n depth_images[i] = depth_images[i] * MM_TO_METERS\n if self._flip_images:\n depth_images[i] = np.flipud(depth_images[i])\n depth_images[i] = np.fliplr(depth_images[i])\n depth_images[i] = DepthImage(depth_images[i], frame=self._frame)\n return depth_images\n",
"role": "user"
},
{
"content": "read_depth_images",
"role": "assistant"
}
] |
def x(self, num_images):
color_images = self._ros_read_images(self._color_image_buffer,
num_images, self.staleness_limit)
for i in range(0, num_images):
if self._flip_images:
color_images[i] = np.flipud(color_images[i].astype(np.uint8))
color_images[i] = np.fliplr(color_images[i].astype(np.uint8))
color_images[i] = ColorImage(color_images[i], frame=self._frame)
return color_images
|
read_color_images
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, num_images):\n color_images = self._ros_read_images(self._color_image_buffer,\n num_images, self.staleness_limit)\n for i in range(0, num_images):\n if self._flip_images:\n color_images[i] = np.flipud(color_images[i].astype(np.uint8))\n color_images[i] = np.fliplr(color_images[i].astype(np.uint8))\n color_images[i] = ColorImage(color_images[i], frame=self._frame)\n return color_images\n",
"role": "user"
},
{
"content": "read_color_images",
"role": "assistant"
}
] |
def x(self, num_img=1, fill_depth=0.0):
depths = self._read_depth_images(num_img)
median_depth = Image.median_images(depths)
median_depth.data[median_depth.data == 0.0] = fill_depth
return median_depth
|
median_depth_img
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, num_img=1, fill_depth=0.0):\n depths = self._read_depth_images(num_img)\n median_depth = Image.median_images(depths)\n median_depth.data[median_depth.data == 0.0] = fill_depth\n return median_depth\n",
"role": "user"
},
{
"content": "median_depth_img",
"role": "assistant"
}
] |
def x(self, num_img=1):
depths = self._read_depth_images(num_img)
return Image.min_images(depths)
|
min_depth_img
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, num_img=1):\n depths = self._read_depth_images(num_img)\n return Image.min_images(depths)\n",
"role": "user"
},
{
"content": "min_depth_img",
"role": "assistant"
}
] |
def x(self):
if self.stable_pose is None:
T_obj_world = RigidTransform(from_frame='obj', to_frame='world')
else:
T_obj_world = self.stable_pose.T_obj_table.as_frames('obj', 'world')
T_camera_obj = T_obj_world.inverse() * self.T_camera_world
return T_camera_obj
|
T_obj_camera
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self.stable_pose is None:\n T_obj_world = RigidTransform(from_frame='obj', to_frame='world')\n else:\n T_obj_world = self.stable_pose.T_obj_table.as_frames('obj', 'world')\n T_camera_obj = T_obj_world.inverse() * self.T_camera_world\n return T_camera_obj\n",
"role": "user"
},
{
"content": "T_obj_camera",
"role": "assistant"
}
] |
def x(self, render_mode):
if render_mode == RenderMode.COLOR:
return self.color_im
elif render_mode == RenderMode.DEPTH:
return self.depth_im
elif render_mode == RenderMode.SEGMASK:
return self.binary_im
else:
return None
|
image
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, render_mode):\n if render_mode == RenderMode.COLOR:\n return self.color_im\n elif render_mode == RenderMode.DEPTH:\n return self.depth_im\n elif render_mode == RenderMode.SEGMASK:\n return self.binary_im\n else:\n return None\n",
"role": "user"
},
{
"content": "image",
"role": "assistant"
}
] |
def x(self, feature):
self.features_.append(feature)
self.num_features_ = len(self.features_)
|
add
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, feature):\n self.features_.append(feature)\n self.num_features_ = len(self.features_)\n",
"role": "user"
},
{
"content": "add",
"role": "assistant"
}
] |
def x(self, features):
self.features_.extend(features)
self.num_features_ = len(self.features_)
|
extend
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, features):\n self.features_.extend(features)\n self.num_features_ = len(self.features_)\n",
"role": "user"
},
{
"content": "extend",
"role": "assistant"
}
] |
def x(self, index):
if index < 0 or index >= self.num_features_:
raise ValueError('Index %d out of range' % index)
return self.features_[index]
|
feature
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, index):\n if index < 0 or index >= self.num_features_:\n raise ValueError('Index %d out of range' % index)\n return self.features_[index]\n",
"role": "user"
},
{
"content": "feature",
"role": "assistant"
}
] |
def x(self, indices):
if isinstance(indices, np.ndarray):
indices = indices.tolist()
if not isinstance(indices, list):
raise ValueError('Can only index with lists')
return [self.features_[i] for i in indices]
|
feature_subset
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, indices):\n if isinstance(indices, np.ndarray):\n indices = indices.tolist()\n if not isinstance(indices, list):\n raise ValueError('Can only index with lists')\n return [self.features_[i] for i in indices]\n",
"role": "user"
},
{
"content": "feature_subset",
"role": "assistant"
}
] |
def x(self):
if rospy.get_name() == '/unnamed':
raise ValueError('Weight sensor must be run inside a ros node!')
self._weight_subscriber = rospy.Subscriber('weight_sensor/weights',
Float32MultiArray, self._weights_callback)
self._running = True
|
start
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if rospy.get_name() == '/unnamed':\n raise ValueError('Weight sensor must be run inside a ros node!')\n self._weight_subscriber = rospy.Subscriber('weight_sensor/weights',\n Float32MultiArray, self._weights_callback)\n self._running = True\n",
"role": "user"
},
{
"content": "start",
"role": "assistant"
}
] |
def x(self):
if not self._running:
return
self._weight_subscriber.unregister()
self._running = False
|
stop
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self._running:\n return\n self._weight_subscriber.unregister()\n self._running = False\n",
"role": "user"
},
{
"content": "stop",
"role": "assistant"
}
] |
def x(self):
weights = self._raw_weights()
if weights.shape[1] == 0:
return 0.0
elif weights.shape[1] < self._ntaps:
return np.sum(np.mean(weights, axis=1))
else:
return self._filter_coeffs.dot(np.sum(weights, axis=0))
|
total_weight
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n weights = self._raw_weights()\n if weights.shape[1] == 0:\n return 0.0\n elif weights.shape[1] < self._ntaps:\n return np.sum(np.mean(weights, axis=1))\n else:\n return self._filter_coeffs.dot(np.sum(weights, axis=0))\n",
"role": "user"
},
{
"content": "total_weight",
"role": "assistant"
}
] |
def x(self):
weights = self._raw_weights()
if weights.shape[1] == 0:
return np.zeros(weights.shape[0])
elif weights.shape[1] < self._ntaps:
return np.mean(weights, axis=1)
else:
return weights.dot(self._filter_coeffs)
|
individual_weights
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n weights = self._raw_weights()\n if weights.shape[1] == 0:\n return np.zeros(weights.shape[0])\n elif weights.shape[1] < self._ntaps:\n return np.mean(weights, axis=1)\n else:\n return weights.dot(self._filter_coeffs)\n",
"role": "user"
},
{
"content": "individual_weights",
"role": "assistant"
}
] |
def x(self):
if self._debug:
return np.array([[], [], [], []])
if not self._running:
raise ValueError('Weight sensor is not running!')
if len(self._weight_buffers) == 0:
time.sleep(0.3)
if len(self._weight_buffers) == 0:
raise ValueError('Weight sensor is not retrieving data!')
weights = np.array(self._weight_buffers)
return weights
|
raw_weights
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._debug:\n return np.array([[], [], [], []])\n if not self._running:\n raise ValueError('Weight sensor is not running!')\n if len(self._weight_buffers) == 0:\n time.sleep(0.3)\n if len(self._weight_buffers) == 0:\n raise ValueError('Weight sensor is not retrieving data!')\n weights = np.array(self._weight_buffers)\n return weights\n",
"role": "user"
},
{
"content": "raw_weights",
"role": "assistant"
}
] |
def x(self, msg):
weights = np.array(msg.data)
if len(self._weight_buffers) == 0:
self._weight_buffers = [[] for i in range(len(weights))]
for i, w in enumerate(weights):
if len(self._weight_buffers[i]) == self._ntaps:
self._weight_buffers[i].pop(0)
self._weight_buffers[i].append(w)
|
weights_callback
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, msg):\n weights = np.array(msg.data)\n if len(self._weight_buffers) == 0:\n self._weight_buffers = [[] for i in range(len(weights))]\n for i, w in enumerate(weights):\n if len(self._weight_buffers[i]) == self._ntaps:\n self._weight_buffers[i].pop(0)\n self._weight_buffers[i].append(w)\n",
"role": "user"
},
{
"content": "weights_callback",
"role": "assistant"
}
] |
def x(self):
return np.r_[self.fx, self.fy, self.cx, self.cy, self.skew, self.height,
self.width]
|
vec
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return np.r_[self.fx, self.fy, self.cx, self.cy, self.skew, self.height,\n self.width]\n",
"role": "user"
},
{
"content": "vec",
"role": "assistant"
}
] |
def x(self):
from sensor_msgs.msg import CameraInfo, RegionOfInterest
from std_msgs.msg import Header
msg_header = Header()
msg_header.frame_id = self._frame
msg_roi = RegionOfInterest()
msg_roi.x_offset = 0
msg_roi.y_offset = 0
msg_roi.height = 0
msg_roi.width = 0
msg_roi.do_rectify = 0
msg = CameraInfo()
msg.header = msg_header
msg.height = self._height
msg.width = self._width
msg.distortion_model = 'plumb_bob'
msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
msg.K = [self._fx, 0.0, self._cx, 0.0, self._fy, self._cy, 0.0, 0.0, 1.0]
msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
msg.P = [self._fx, 0.0, self._cx, 0.0, 0.0, self._fx, self._cy, 0.0,
0.0, 0.0, 1.0, 0.0]
msg.binning_x = 0
msg.binning_y = 0
msg.roi = msg_roi
return msg
|
rosmsg
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n from sensor_msgs.msg import CameraInfo, RegionOfInterest\n from std_msgs.msg import Header\n msg_header = Header()\n msg_header.frame_id = self._frame\n msg_roi = RegionOfInterest()\n msg_roi.x_offset = 0\n msg_roi.y_offset = 0\n msg_roi.height = 0\n msg_roi.width = 0\n msg_roi.do_rectify = 0\n msg = CameraInfo()\n msg.header = msg_header\n msg.height = self._height\n msg.width = self._width\n msg.distortion_model = 'plumb_bob'\n msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]\n msg.K = [self._fx, 0.0, self._cx, 0.0, self._fy, self._cy, 0.0, 0.0, 1.0]\n msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]\n msg.P = [self._fx, 0.0, self._cx, 0.0, 0.0, self._fx, self._cy, 0.0, \n 0.0, 0.0, 1.0, 0.0]\n msg.binning_x = 0\n msg.binning_y = 0\n msg.roi = msg_roi\n return msg\n",
"role": "user"
},
{
"content": "rosmsg",
"role": "assistant"
}
] |
def x(self, height, width, crop_ci, crop_cj):
cx = self.cx + float(width - 1) / 2 - crop_cj
cy = self.cy + float(height - 1) / 2 - crop_ci
cropped_intrinsics = CameraIntrinsics(frame=self.frame, fx=self.fx, fy=
self.fy, skew=self.skew, cx=cx, cy=cy, height=height, width=width)
return cropped_intrinsics
|
crop
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, height, width, crop_ci, crop_cj):\n cx = self.cx + float(width - 1) / 2 - crop_cj\n cy = self.cy + float(height - 1) / 2 - crop_ci\n cropped_intrinsics = CameraIntrinsics(frame=self.frame, fx=self.fx, fy=\n self.fy, skew=self.skew, cx=cx, cy=cy, height=height, width=width)\n return cropped_intrinsics\n",
"role": "user"
},
{
"content": "crop",
"role": "assistant"
}
] |
def x(self, scale):
center_x = float(self.width - 1) / 2
center_y = float(self.height - 1) / 2
orig_cx_diff = self.cx - center_x
orig_cy_diff = self.cy - center_y
height = scale * self.height
width = scale * self.width
scaled_center_x = float(width - 1) / 2
scaled_center_y = float(height - 1) / 2
fx = scale * self.fx
fy = scale * self.fy
skew = scale * self.skew
cx = scaled_center_x + scale * orig_cx_diff
cy = scaled_center_y + scale * orig_cy_diff
scaled_intrinsics = CameraIntrinsics(frame=self.frame, fx=fx, fy=fy,
skew=skew, cx=cx, cy=cy, height=height, width=width)
return scaled_intrinsics
|
resize
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, scale):\n center_x = float(self.width - 1) / 2\n center_y = float(self.height - 1) / 2\n orig_cx_diff = self.cx - center_x\n orig_cy_diff = self.cy - center_y\n height = scale * self.height\n width = scale * self.width\n scaled_center_x = float(width - 1) / 2\n scaled_center_y = float(height - 1) / 2\n fx = scale * self.fx\n fy = scale * self.fy\n skew = scale * self.skew\n cx = scaled_center_x + scale * orig_cx_diff\n cy = scaled_center_y + scale * orig_cy_diff\n scaled_intrinsics = CameraIntrinsics(frame=self.frame, fx=fx, fy=fy,\n skew=skew, cx=cx, cy=cy, height=height, width=width)\n return scaled_intrinsics\n",
"role": "user"
},
{
"content": "resize",
"role": "assistant"
}
] |
def x(self, point_cloud, round_px=True):
if not isinstance(point_cloud, PointCloud) and not (isinstance(
point_cloud, Point) and point_cloud.dim == 3):
raise ValueError(
'Must provide PointCloud or 3D Point object for projection')
if point_cloud.frame != self._frame:
raise ValueError(
'Cannot project points in frame %s into camera with frame %s' %
(point_cloud.frame, self._frame))
points_proj = self._K.dot(point_cloud.data)
if len(points_proj.shape) == 1:
points_proj = points_proj[:, np.newaxis]
point_depths = np.tile(points_proj[2, :], [3, 1])
points_proj = np.divide(points_proj, point_depths)
if round_px:
points_proj = np.round(points_proj)
if isinstance(point_cloud, Point):
return Point(data=points_proj[:2, :].astype(np.int16), frame=self.
_frame)
return ImageCoords(data=points_proj[:2, :].astype(np.int16), frame=self
._frame)
|
project
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(\n point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError(\n 'Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError(\n 'Cannot project points in frame %s into camera with frame %s' %\n (point_cloud.frame, self._frame))\n points_proj = self._K.dot(point_cloud.data)\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = np.tile(points_proj[2, :], [3, 1])\n points_proj = np.divide(points_proj, point_depths)\n if round_px:\n points_proj = np.round(points_proj)\n if isinstance(point_cloud, Point):\n return Point(data=points_proj[:2, :].astype(np.int16), frame=self.\n _frame)\n return ImageCoords(data=points_proj[:2, :].astype(np.int16), frame=self\n ._frame)\n",
"role": "user"
},
{
"content": "project",
"role": "assistant"
}
] |
def x(self, depth_image):
point_cloud = self.deproject(depth_image)
point_cloud_im_data = point_cloud.data.T.reshape(depth_image.height,
depth_image.width, 3)
return PointCloudImage(data=point_cloud_im_data, frame=self._frame)
|
deproject_to_image
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, depth_image):\n point_cloud = self.deproject(depth_image)\n point_cloud_im_data = point_cloud.data.T.reshape(depth_image.height,\n depth_image.width, 3)\n return PointCloudImage(data=point_cloud_im_data, frame=self._frame)\n",
"role": "user"
},
{
"content": "deproject_to_image",
"role": "assistant"
}
] |
def x(filename):
file_root, file_ext = os.path.splitext(filename)
if file_ext.lower() != INTR_EXTENSION:
raise ValueError(
'Extension %s not supported for CameraIntrinsics. Must be stored with extension %s'
% (file_ext, INTR_EXTENSION))
f = open(filename, 'r')
ci = json.load(f)
f.close()
return CameraIntrinsics(frame=ci['_frame'], fx=ci['_fx'], fy=ci['_fy'],
cx=ci['_cx'], cy=ci['_cy'], skew=ci['_skew'], height=ci['_height'],
width=ci['_width'])
|
load
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(filename):\n file_root, file_ext = os.path.splitext(filename)\n if file_ext.lower() != INTR_EXTENSION:\n raise ValueError(\n 'Extension %s not supported for CameraIntrinsics. Must be stored with extension %s'\n % (file_ext, INTR_EXTENSION))\n f = open(filename, 'r')\n ci = json.load(f)\n f.close()\n return CameraIntrinsics(frame=ci['_frame'], fx=ci['_fx'], fy=ci['_fy'],\n cx=ci['_cx'], cy=ci['_cy'], skew=ci['_skew'], height=ci['_height'],\n width=ci['_width'])\n",
"role": "user"
},
{
"content": "load",
"role": "assistant"
}
] |
def x(cfg):
if 'prestored_data' in cfg.keys() and cfg['prestored_data'] == 1:
sensor = VirtualKinect2Sensor(path_to_images=cfg[
'prestored_data_dir'], frame=cfg['sensor']['frame'])
else:
sensor = Kinect2Sensor(device_num=cfg['sensor']['device_num'],
frame=cfg['sensor']['frame'], packet_pipeline_mode=cfg['sensor'
]['pipeline_mode'])
sensor.start()
ir_intrinsics = sensor.ir_intrinsics
colors = []
depths = []
for _ in range(cfg['num_images']):
color, depth, _ = sensor.frames()
colors.append(color)
depths.append(depth)
sensor.stop()
return colors, depths, ir_intrinsics
|
load_images
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(cfg):\n if 'prestored_data' in cfg.keys() and cfg['prestored_data'] == 1:\n sensor = VirtualKinect2Sensor(path_to_images=cfg[\n 'prestored_data_dir'], frame=cfg['sensor']['frame'])\n else:\n sensor = Kinect2Sensor(device_num=cfg['sensor']['device_num'],\n frame=cfg['sensor']['frame'], packet_pipeline_mode=cfg['sensor'\n ]['pipeline_mode'])\n sensor.start()\n ir_intrinsics = sensor.ir_intrinsics\n colors = []\n depths = []\n for _ in range(cfg['num_images']):\n color, depth, _ = sensor.frames()\n colors.append(color)\n depths.append(depth)\n sensor.stop()\n return colors, depths, ir_intrinsics\n",
"role": "user"
},
{
"content": "load_images",
"role": "assistant"
}
] |
def x(self):
if self._device is None:
raise RuntimeError(
'Kinect2 device %s not runnning. Cannot return color intrinsics')
camera_params = self._device.getColorCameraParams()
return CameraIntrinsics(self._color_frame, camera_params.fx,
camera_params.fy, camera_params.cx, camera_params.cy)
|
color_intrinsics
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._device is None:\n raise RuntimeError(\n 'Kinect2 device %s not runnning. Cannot return color intrinsics')\n camera_params = self._device.getColorCameraParams()\n return CameraIntrinsics(self._color_frame, camera_params.fx,\n camera_params.fy, camera_params.cx, camera_params.cy)\n",
"role": "user"
},
{
"content": "color_intrinsics",
"role": "assistant"
}
] |
def x(self):
if self._device is None:
raise RuntimeError(
'Kinect2 device %s not runnning. Cannot return IR intrinsics')
camera_params = self._device.getIrCameraParams()
return CameraIntrinsics(self._ir_frame, camera_params.fx, camera_params
.fy, camera_params.cx, camera_params.cy, height=Kinect2Sensor.
DEPTH_IM_HEIGHT, width=Kinect2Sensor.DEPTH_IM_WIDTH)
|
ir_intrinsics
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._device is None:\n raise RuntimeError(\n 'Kinect2 device %s not runnning. Cannot return IR intrinsics')\n camera_params = self._device.getIrCameraParams()\n return CameraIntrinsics(self._ir_frame, camera_params.fx, camera_params\n .fy, camera_params.cx, camera_params.cy, height=Kinect2Sensor.\n DEPTH_IM_HEIGHT, width=Kinect2Sensor.DEPTH_IM_WIDTH)\n",
"role": "user"
},
{
"content": "ir_intrinsics",
"role": "assistant"
}
] |
def x(self):
if self._packet_pipeline_mode == Kinect2PacketPipelineMode.OPENGL:
self._pipeline = lf2.OpenGLPacketPipeline()
elif self._packet_pipeline_mode == Kinect2PacketPipelineMode.CPU:
self._pipeline = lf2.CpuPacketPipeline()
self._logger = lf2.createConsoleLogger(lf2.LoggerLevel.Warning)
lf2.setGlobalLogger(self._logger)
self._fn_handle = lf2.Freenect2()
self._num_devices = self._fn_handle.enumerateDevices()
if self._num_devices == 0:
raise IOError('Failed to start stream. No Kinect2 devices available!')
if self._num_devices <= self._device_num:
raise IOError('Failed to start stream. Device num %d unavailable!' %
self._device_num)
self._serial = self._fn_handle.getDeviceSerialNumber(self._device_num)
self._device = self._fn_handle.openDevice(self._serial, pipeline=self.
_pipeline)
self._listener = lf2.SyncMultiFrameListener(lf2.FrameType.Color | lf2.
FrameType.Ir | lf2.FrameType.Depth)
self._device.setColorFrameListener(self._listener)
self._device.setIrAndDepthFrameListener(self._listener)
self._device.start()
self._registration = None
if self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH:
logging.debug('Using color to depth registration')
self._registration = lf2.Registration(self._device.
getIrCameraParams(), self._device.getColorCameraParams())
self._running = True
|
start
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._packet_pipeline_mode == Kinect2PacketPipelineMode.OPENGL:\n self._pipeline = lf2.OpenGLPacketPipeline()\n elif self._packet_pipeline_mode == Kinect2PacketPipelineMode.CPU:\n self._pipeline = lf2.CpuPacketPipeline()\n self._logger = lf2.createConsoleLogger(lf2.LoggerLevel.Warning)\n lf2.setGlobalLogger(self._logger)\n self._fn_handle = lf2.Freenect2()\n self._num_devices = self._fn_handle.enumerateDevices()\n if self._num_devices == 0:\n raise IOError('Failed to start stream. No Kinect2 devices available!')\n if self._num_devices <= self._device_num:\n raise IOError('Failed to start stream. Device num %d unavailable!' %\n self._device_num)\n self._serial = self._fn_handle.getDeviceSerialNumber(self._device_num)\n self._device = self._fn_handle.openDevice(self._serial, pipeline=self.\n _pipeline)\n self._listener = lf2.SyncMultiFrameListener(lf2.FrameType.Color | lf2.\n FrameType.Ir | lf2.FrameType.Depth)\n self._device.setColorFrameListener(self._listener)\n self._device.setIrAndDepthFrameListener(self._listener)\n self._device.start()\n self._registration = None\n if self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH:\n logging.debug('Using color to depth registration')\n self._registration = lf2.Registration(self._device.\n getIrCameraParams(), self._device.getColorCameraParams())\n self._running = True\n",
"role": "user"
},
{
"content": "start",
"role": "assistant"
}
] |
def x(self):
if not self._running or self._device is None:
logging.warning('Kinect2 device %d not runnning. Aborting stop' %
self._device_num)
return False
self._device.stop()
self._device.close()
self._device = None
self._running = False
return True
|
stop
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self._running or self._device is None:\n logging.warning('Kinect2 device %d not runnning. Aborting stop' %\n self._device_num)\n return False\n self._device.stop()\n self._device.close()\n self._device = None\n self._running = False\n return True\n",
"role": "user"
},
{
"content": "stop",
"role": "assistant"
}
] |
def x(self, skip_registration=False):
color_im, depth_im, ir_im, _ = self._frames_and_index_map(skip_registration
=skip_registration)
return color_im, depth_im, ir_im
|
frames
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, skip_registration=False):\n color_im, depth_im, ir_im, _ = self._frames_and_index_map(skip_registration\n =skip_registration)\n return color_im, depth_im, ir_im\n",
"role": "user"
},
{
"content": "frames",
"role": "assistant"
}
] |
def x(self, skip_registration=False):
if not self._running:
raise RuntimeError(
'Kinect2 device %s not runnning. Cannot read frames' % self.
_device_num)
frames = self._listener.waitForNewFrame()
unregistered_color = frames['color']
distorted_depth = frames['depth']
ir = frames['ir']
color_frame = self._color_frame
color = unregistered_color
depth = distorted_depth
color_depth_map = np.zeros([depth.height, depth.width]).astype(np.int32
).ravel()
if (not skip_registration and self._registration_mode ==
Kinect2RegistrationMode.COLOR_TO_DEPTH):
color_frame = self._ir_frame
depth = lf2.Frame(depth.width, depth.height, 4, lf2.FrameType.Depth)
color = lf2.Frame(depth.width, depth.height, 4, lf2.FrameType.Color)
self._registration.apply(unregistered_color, distorted_depth, depth,
color, color_depth_map=color_depth_map)
color_arr = copy.copy(color.asarray())
color_arr[:, :, [0, 2]] = color_arr[:, :, [2, 0]]
color_arr[:, :, 0] = np.fliplr(color_arr[:, :, 0])
color_arr[:, :, 1] = np.fliplr(color_arr[:, :, 1])
color_arr[:, :, 2] = np.fliplr(color_arr[:, :, 2])
color_arr[:, :, 3] = np.fliplr(color_arr[:, :, 3])
depth_arr = np.fliplr(copy.copy(depth.asarray()))
ir_arr = np.fliplr(copy.copy(ir.asarray()))
if self._depth_mode == Kinect2DepthMode.METERS:
depth_arr = depth_arr * MM_TO_METERS
self._listener.release(frames)
return ColorImage(color_arr[:, :, :3], color_frame), DepthImage(depth_arr,
self._ir_frame), IrImage(ir_arr.astype(np.uint16), self._ir_frame
), color_depth_map
|
frames_and_index_map
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, skip_registration=False):\n if not self._running:\n raise RuntimeError(\n 'Kinect2 device %s not runnning. Cannot read frames' % self.\n _device_num)\n frames = self._listener.waitForNewFrame()\n unregistered_color = frames['color']\n distorted_depth = frames['depth']\n ir = frames['ir']\n color_frame = self._color_frame\n color = unregistered_color\n depth = distorted_depth\n color_depth_map = np.zeros([depth.height, depth.width]).astype(np.int32\n ).ravel()\n if (not skip_registration and self._registration_mode ==\n Kinect2RegistrationMode.COLOR_TO_DEPTH):\n color_frame = self._ir_frame\n depth = lf2.Frame(depth.width, depth.height, 4, lf2.FrameType.Depth)\n color = lf2.Frame(depth.width, depth.height, 4, lf2.FrameType.Color)\n self._registration.apply(unregistered_color, distorted_depth, depth,\n color, color_depth_map=color_depth_map)\n color_arr = copy.copy(color.asarray())\n color_arr[:, :, [0, 2]] = color_arr[:, :, [2, 0]]\n color_arr[:, :, 0] = np.fliplr(color_arr[:, :, 0])\n color_arr[:, :, 1] = np.fliplr(color_arr[:, :, 1])\n color_arr[:, :, 2] = np.fliplr(color_arr[:, :, 2])\n color_arr[:, :, 3] = np.fliplr(color_arr[:, :, 3])\n depth_arr = np.fliplr(copy.copy(depth.asarray()))\n ir_arr = np.fliplr(copy.copy(ir.asarray()))\n if self._depth_mode == Kinect2DepthMode.METERS:\n depth_arr = depth_arr * MM_TO_METERS\n self._listener.release(frames)\n return ColorImage(color_arr[:, :, :3], color_frame), DepthImage(depth_arr,\n self._ir_frame), IrImage(ir_arr.astype(np.uint16), self._ir_frame\n ), color_depth_map\n",
"role": "user"
},
{
"content": "frames_and_index_map",
"role": "assistant"
}
] |
def x(self, msg):
encoding = msg.encoding
try:
image = self._bridge.imgmsg_to_cv2(msg, encoding)
except CvBridgeError as e:
rospy.logerr(e)
return image
|
process_image_msg
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, msg):\n encoding = msg.encoding\n try:\n image = self._bridge.imgmsg_to_cv2(msg, encoding)\n except CvBridgeError as e:\n rospy.logerr(e)\n return image\n",
"role": "user"
},
{
"content": "process_image_msg",
"role": "assistant"
}
] |
def x(self, image_msg):
color_arr = self._process_image_msg(image_msg)
self._cur_color_im = ColorImage(color_arr[:, :, ::-1], self._frame)
|
color_image_callback
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, image_msg):\n color_arr = self._process_image_msg(image_msg)\n self._cur_color_im = ColorImage(color_arr[:, :, ::-1], self._frame)\n",
"role": "user"
},
{
"content": "color_image_callback",
"role": "assistant"
}
] |
def x(self, image_msg):
encoding = image_msg.encoding
try:
depth_arr = self._bridge.imgmsg_to_cv2(image_msg, encoding)
import pdb
pdb.set_trace()
except CvBridgeError as e:
rospy.logerr(e)
depth = np.array(depth_arr * MM_TO_METERS, np.float32)
self._cur_depth_im = DepthImage(depth, self._frame)
|
depth_image_callback
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, image_msg):\n encoding = image_msg.encoding\n try:\n depth_arr = self._bridge.imgmsg_to_cv2(image_msg, encoding)\n import pdb\n pdb.set_trace()\n except CvBridgeError as e:\n rospy.logerr(e)\n depth = np.array(depth_arr * MM_TO_METERS, np.float32)\n self._cur_depth_im = DepthImage(depth, self._frame)\n",
"role": "user"
},
{
"content": "depth_image_callback",
"role": "assistant"
}
] |
def x(self):
self._image_sub = rospy.Subscriber(self.topic_image_color, sensor_msgs.
msg.Image, self._color_image_callback)
self._depth_sub = rospy.Subscriber(self.topic_image_depth, sensor_msgs.
msg.Image, self._depth_image_callback)
self._camera_info_sub = rospy.Subscriber(self.topic_info_camera,
sensor_msgs.msg.CameraInfo, self._camera_info_callback)
timeout = 10
try:
rospy.loginfo('waiting to recieve a message from the Kinect')
rospy.wait_for_message(self.topic_image_color, sensor_msgs.msg.
Image, timeout=timeout)
rospy.wait_for_message(self.topic_image_depth, sensor_msgs.msg.
Image, timeout=timeout)
rospy.wait_for_message(self.topic_info_camera, sensor_msgs.msg.
CameraInfo, timeout=timeout)
except rospy.ROSException as e:
print('KINECT NOT FOUND')
rospy.logerr('Kinect topic not found, Kinect not started')
rospy.logerr(e)
while self._camera_intr is None:
time.sleep(0.1)
self._running = True
|
start
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self._image_sub = rospy.Subscriber(self.topic_image_color, sensor_msgs.\n msg.Image, self._color_image_callback)\n self._depth_sub = rospy.Subscriber(self.topic_image_depth, sensor_msgs.\n msg.Image, self._depth_image_callback)\n self._camera_info_sub = rospy.Subscriber(self.topic_info_camera,\n sensor_msgs.msg.CameraInfo, self._camera_info_callback)\n timeout = 10\n try:\n rospy.loginfo('waiting to recieve a message from the Kinect')\n rospy.wait_for_message(self.topic_image_color, sensor_msgs.msg.\n Image, timeout=timeout)\n rospy.wait_for_message(self.topic_image_depth, sensor_msgs.msg.\n Image, timeout=timeout)\n rospy.wait_for_message(self.topic_info_camera, sensor_msgs.msg.\n CameraInfo, timeout=timeout)\n except rospy.ROSException as e:\n print('KINECT NOT FOUND')\n rospy.logerr('Kinect topic not found, Kinect not started')\n rospy.logerr(e)\n while self._camera_intr is None:\n time.sleep(0.1)\n self._running = True\n",
"role": "user"
},
{
"content": "start",
"role": "assistant"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.