code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if hasattr(request, 'user') and hasattr(request.user, 'id') and request.user.id:
monitoring.set_custom_metric('request_user_id', request.user.id) | def _set_request_user_id_metric(self, request) | Add request_user_id metric
Metrics:
request_user_id | 2.724968 | 2.761992 | 0.986595 |
if 'HTTP_REFERER' in request.META and request.META['HTTP_REFERER']:
monitoring.set_custom_metric('request_referer', request.META['HTTP_REFERER']) | def _set_request_referer_metric(self, request) | Add metric 'request_referer' for http referer. | 2.96434 | 2.483566 | 1.193582 |
if 'HTTP_USER_AGENT' in request.META and request.META['HTTP_USER_AGENT']:
user_agent = request.META['HTTP_USER_AGENT']
monitoring.set_custom_metric('request_user_agent', user_agent)
if user_agent:
# Example agent string from edx-rest-api-client:
# python-requests/2.9.1 edx-rest-api-client/1.7.2 ecommerce
# See https://github.com/edx/edx-rest-api-client/commit/692903c30b157f7a4edabc2f53aae1742db3a019
user_agent_parts = user_agent.split()
if len(user_agent_parts) == 3 and user_agent_parts[1].startswith('edx-rest-api-client/'):
monitoring.set_custom_metric('request_client_name', user_agent_parts[2]) | def _set_request_user_agent_metrics(self, request) | Add metrics for user agent for python.
Metrics:
request_user_agent
request_client_name: The client name from edx-rest-api-client calls. | 3.729052 | 3.468364 | 1.075162 |
if 'HTTP_AUTHORIZATION' in request.META and request.META['HTTP_AUTHORIZATION']:
token_parts = request.META['HTTP_AUTHORIZATION'].split()
# Example: "JWT eyJhbGciO..."
if len(token_parts) == 2:
auth_type = token_parts[0].lower() # 'jwt' or 'bearer' (for example)
else:
auth_type = 'other-token-type'
elif not hasattr(request, 'user') or not request.user:
auth_type = 'no-user'
elif not request.user.is_authenticated:
auth_type = 'unauthenticated'
else:
auth_type = 'session-or-unknown'
monitoring.set_custom_metric('request_auth_type', auth_type) | def _set_request_auth_type_metric(self, request) | Add metric 'request_auth_type' for the authentication type used.
NOTE: This is a best guess at this point. Possible values include:
no-user
unauthenticated
jwt/bearer/other-token-type
session-or-unknown (catch all) | 2.939508 | 2.319499 | 1.267303 |
return '{}\r\n'.format(json.dumps({
'id': identifier,
'method': method,
'params': params or {},
'jsonrpc': '2.0'
})).encode() | def jsonrpc_request(method, identifier, params=None) | Produce a JSONRPC request. | 2.847015 | 2.714796 | 1.048703 |
self._data_buffer += data.decode()
if not self._data_buffer.endswith('\r\n'):
return
data = self._data_buffer
self._data_buffer = '' # clear buffer
for cmd in data.strip().split('\r\n'):
data = json.loads(cmd)
if not isinstance(data, list):
data = [data]
for item in data:
self.handle_data(item) | def data_received(self, data) | Handle received data. | 2.657382 | 2.490379 | 1.06706 |
if 'id' in data:
self.handle_response(data)
else:
self.handle_notification(data) | def handle_data(self, data) | Handle JSONRPC data. | 4.186727 | 2.655356 | 1.57671 |
identifier = data.get('id')
self._buffer[identifier]['data'] = data.get('result')
self._buffer[identifier]['flag'].set() | def handle_response(self, data) | Handle JSONRPC response. | 7.179883 | 5.954813 | 1.205728 |
if data.get('method') in self._callbacks:
self._callbacks.get(data.get('method'))(data.get('params')) | def handle_notification(self, data) | Handle JSONRPC notification. | 3.709074 | 2.942723 | 1.260422 |
identifier = random.randint(1, 1000)
self._transport.write(jsonrpc_request(method, identifier, params))
self._buffer[identifier] = {'flag': asyncio.Event()}
yield from self._buffer[identifier]['flag'].wait()
result = self._buffer[identifier]['data']
del self._buffer[identifier]['data']
return result | def request(self, method, params) | Send a JSONRPC request. | 4.032195 | 3.793229 | 1.062998 |
yield from self._do_connect()
_LOGGER.info('connected to snapserver on %s:%s', self._host, self._port)
status = yield from self.status()
self.synchronize(status)
self._on_server_connect() | def start(self) | Initiate server connection. | 7.908879 | 6.946795 | 1.138493 |
_, self._protocol = yield from self._loop.create_connection(
lambda: SnapcastProtocol(self._callbacks), self._host, self._port) | def _do_connect(self) | Perform the connection to the server. | 7.08249 | 5.827132 | 1.215433 |
@asyncio.coroutine
def try_reconnect():
try:
yield from self._do_connect()
except IOError:
self._loop.call_later(SERVER_RECONNECT_DELAY,
self._reconnect_cb)
asyncio.ensure_future(try_reconnect()) | def _reconnect_cb(self) | Callback to reconnect to the server. | 4.326881 | 4.081186 | 1.060202 |
result = yield from self._protocol.request(method, params)
return result | def _transact(self, method, params=None) | Wrap requests. | 8.123904 | 9.435538 | 0.86099 |
params = {'id': identifier}
response = yield from self._transact(SERVER_DELETECLIENT, params)
self.synchronize(response) | def delete_client(self, identifier) | Delete client. | 13.334774 | 11.632003 | 1.146387 |
self._version = status.get('server').get('version')
self._groups = {}
self._clients = {}
self._streams = {}
for stream in status.get('server').get('streams'):
self._streams[stream.get('id')] = Snapstream(stream)
_LOGGER.debug('stream found: %s', self._streams[stream.get('id')])
for group in status.get('server').get('groups'):
self._groups[group.get('id')] = Snapgroup(self, group)
_LOGGER.debug('group found: %s', self._groups[group.get('id')])
for client in group.get('clients'):
self._clients[client.get('id')] = Snapclient(self, client)
_LOGGER.debug('client found: %s', self._clients[client.get('id')]) | def synchronize(self, status) | Synchronize snapserver. | 2.024698 | 1.856301 | 1.090717 |
params = {'id': identifier}
if key is not None and value is not None:
params[key] = value
result = yield from self._transact(method, params)
return result.get(key) | def _request(self, method, identifier, key=None, value=None) | Perform request with identifier. | 3.605098 | 3.539276 | 1.018598 |
self._protocol = None
if self._on_disconnect_callback_func and callable(self._on_disconnect_callback_func):
self._on_disconnect_callback_func(exception)
if self._reconnect:
self._reconnect_cb() | def _on_server_disconnect(self, exception) | Handle server disconnection. | 3.644516 | 3.327163 | 1.095382 |
self._groups.get(data.get('id')).update_mute(data) | def _on_group_mute(self, data) | Handle group mute. | 10.059541 | 7.84285 | 1.282638 |
self._groups.get(data.get('id')).update_stream(data) | def _on_group_stream_changed(self, data) | Handle group stream change. | 9.886129 | 7.340982 | 1.346704 |
client = None
if data.get('id') in self._clients:
client = self._clients[data.get('id')]
client.update_connected(True)
else:
client = Snapclient(self, data.get('client'))
self._clients[data.get('id')] = client
if self._new_client_callback_func and callable(self._new_client_callback_func):
self._new_client_callback_func(client)
_LOGGER.info('client %s connected', client.friendly_name) | def _on_client_connect(self, data) | Handle client connect. | 2.920094 | 2.860382 | 1.020876 |
self._clients[data.get('id')].update_connected(False)
_LOGGER.info('client %s disconnected', self._clients[data.get('id')].friendly_name) | def _on_client_disconnect(self, data) | Handle client disconnect. | 4.63665 | 4.208727 | 1.101675 |
self._clients.get(data.get('id')).update_volume(data) | def _on_client_volume_changed(self, data) | Handle client volume change. | 9.520772 | 7.088476 | 1.343134 |
self._clients.get(data.get('id')).update_name(data) | def _on_client_name_changed(self, data) | Handle client name changed. | 9.786958 | 7.041457 | 1.389905 |
self._clients.get(data.get('id')).update_latency(data) | def _on_client_latency_changed(self, data) | Handle client latency changed. | 10.059596 | 7.294038 | 1.379153 |
self._streams[data.get('id')].update(data.get('stream'))
_LOGGER.info('stream %s updated', self._streams[data.get('id')].friendly_name)
for group in self._groups.values():
if group.stream == data.get('id'):
group.callback() | def _on_stream_update(self, data) | Handle stream update. | 3.754819 | 3.457822 | 1.085891 |
from uuid import getnode as get_mac
return ':'.join(("%012x" % get_mac())[i:i+2] for i in range(0, 12, 2)) | def mac() | Get MAC. | 2.018972 | 1.926155 | 1.048187 |
self._queue.put(hello_packet(socket.gethostname(), mac(), __version__))
self._queue.put(request_packet(MSG_SERVER_SETTINGS))
self._queue.put(request_packet(MSG_SAMPLE_FORMAT))
self._queue.put(request_packet(MSG_HEADER)) | def register(self) | Transact with server. | 6.691668 | 6.317962 | 1.05915 |
self._queue.put(command_packet(CMD_START_STREAM))
_LOGGER.info('Requesting stream')
self._source.run() | def request_start(self) | Indicate readiness to receive stream.
This is a blocking call. | 12.410147 | 11.71529 | 1.059312 |
while True:
base_bytes = self._socket.recv(BASE_SIZE)
base = basemessage.parse(base_bytes)
payload_bytes = self._socket.recv(base.payload_length)
self._handle_message(packet.parse(base_bytes + payload_bytes)) | def _read_socket(self) | Process incoming messages from socket. | 4.804099 | 4.452612 | 1.07894 |
if data.type == MSG_SERVER_SETTINGS:
_LOGGER.info(data.payload)
elif data.type == MSG_SAMPLE_FORMAT:
_LOGGER.info(data.payload)
self._connected = True
elif data.type == MSG_TIME:
if not self._buffered:
_LOGGER.info('Buffering')
elif data.type == MSG_HEADER:
# Push to app source and start playing.
_LOGGER.info(data.payload.codec.decode('ascii'))
self._source.push(data.payload.header)
self._source.play()
elif data.type == MSG_WIRE_CHUNK:
# Add chunks to play queue.
self._buffer.put(data.payload.chunk)
if self._buffer.qsize() > BUFFER_SIZE:
self._buffered = True
if self._buffer.empty():
self._buffered = False | def _handle_message(self, data) | Handle messages. | 4.595772 | 4.437604 | 1.035643 |
while True:
now = time.time()
if self._connected and (self._last_sync + SYNC_AFTER) < now:
self._queue.put(request_packet(MSG_TIME))
self._last_sync = now
if not self._queue.empty():
self._socket.send(self._queue.get()) | def _write_socket(self) | Pass messages from queue to socket. | 4.206261 | 3.789019 | 1.110119 |
while True:
if self._buffered:
self._source.push(self._buffer.get()) | def _play(self) | Relay buffer to app source. | 11.26153 | 7.372365 | 1.527533 |
self._group['stream_id'] = stream_id
yield from self._server.group_stream(self.identifier, stream_id)
_LOGGER.info('set stream to %s on %s', stream_id, self.friendly_name) | def set_stream(self, stream_id) | Set group stream. | 5.29949 | 4.607127 | 1.150281 |
self._group['muted'] = status
yield from self._server.group_mute(self.identifier, status)
_LOGGER.info('set muted to %s on %s', status, self.friendly_name) | def set_muted(self, status) | Set group mute status. | 5.827887 | 5.223074 | 1.115796 |
volume_sum = 0
for client in self._group.get('clients'):
volume_sum += self._server.client(client.get('id')).volume
return int(volume_sum / len(self._group.get('clients'))) | def volume(self) | Get volume. | 4.939785 | 4.816939 | 1.025503 |
for data in self._group.get('clients'):
client = self._server.client(data.get('id'))
yield from client.set_volume(volume, update_group=False)
client.update_volume({
'volume': {
'percent': volume,
'muted': client.muted
}
})
_LOGGER.info('set volume to %s on clients in %s', volume, self.friendly_name) | def set_volume(self, volume) | Set volume. | 5.634545 | 5.586114 | 1.00867 |
if client_identifier in self.clients:
_LOGGER.error('%s already in group %s', client_identifier, self.identifier)
return
new_clients = self.clients
new_clients.append(client_identifier)
yield from self._server.group_clients(self.identifier, new_clients)
_LOGGER.info('added %s to %s', client_identifier, self.identifier)
self._server.client(client_identifier).callback()
self.callback() | def add_client(self, client_identifier) | Add a client. | 3.326516 | 3.276046 | 1.015406 |
new_clients = self.clients
new_clients.remove(client_identifier)
yield from self._server.group_clients(self.identifier, new_clients)
_LOGGER.info('removed %s from %s', client_identifier, self.identifier)
self._server.client(client_identifier).callback()
self.callback() | def remove_client(self, client_identifier) | Remove a client. | 4.717497 | 4.550023 | 1.036807 |
self._group['muted'] = data['mute']
self.callback()
_LOGGER.info('updated mute on %s', self.friendly_name) | def update_mute(self, data) | Update mute. | 8.547952 | 7.917503 | 1.079627 |
self._group['stream_id'] = data['stream_id']
self.callback()
_LOGGER.info('updated stream to %s on %s', self.stream, self.friendly_name) | def update_stream(self, data) | Update stream. | 7.294697 | 6.921171 | 1.053969 |
self._snapshot = {
'muted': self.muted,
'volume': self.volume,
'stream': self.stream
}
_LOGGER.info('took snapshot of current state of %s', self.friendly_name) | def snapshot(self) | Snapshot current state. | 5.425134 | 4.799804 | 1.130282 |
if not self._snapshot:
return
yield from self.set_muted(self._snapshot['muted'])
yield from self.set_volume(self._snapshot['volume'])
yield from self.set_stream(self._snapshot['stream'])
self.callback()
_LOGGER.info('restored snapshot of state of %s', self.friendly_name) | def restore(self) | Restore snapshotted state. | 4.281245 | 3.993912 | 1.071943 |
if self._callback_func and callable(self._callback_func):
self._callback_func(self) | def callback(self) | Run callback. | 4.347226 | 3.586229 | 1.2122 |
return packet.build(
Container(
type=message_type,
id=1,
refer=0,
sent=Container(
secs=0,
usecs=0
),
recv=Container(
secs=0,
usecs=0
),
payload_length=payload_length,
payload=payload
)
) | def message(message_type, payload, payload_length) | Build a message. | 3.804777 | 3.858828 | 0.985993 |
as_list = []
length = 2
for field, value in data.items():
as_list.append(Container(field=bytes(field, ENCODING),
value=bytes(value, ENCODING)))
length += len(field) + len(value) + 4
return (Container(
num=len(as_list),
map=as_list
), length) | def map_helper(data) | Build a map message. | 4.513048 | 4.378933 | 1.030627 |
return message('Command',
Container(string_length=len(cmd),
string=bytes(cmd, ENCODING)),
len(cmd) + 2) | def command_packet(cmd) | Build a command message. | 12.963611 | 11.191563 | 1.158338 |
for group in self._server.groups:
if self.identifier in group.clients:
return group | def group(self) | Get group. | 10.63421 | 8.588569 | 1.238182 |
if len(self._client.get('config').get('name')):
return self._client.get('config').get('name')
return self._client.get('host').get('name') | def friendly_name(self) | Get friendly name. | 4.338886 | 3.786195 | 1.145975 |
if not name:
name = ''
self._client['config']['name'] = name
yield from self._server.client_name(self.identifier, name) | def set_name(self, name) | Set a client name. | 9.179059 | 6.934569 | 1.323667 |
self._client['config']['latency'] = latency
yield from self._server.client_latency(self.identifier, latency) | def set_latency(self, latency) | Set client latency. | 13.941478 | 10.025522 | 1.390599 |
new_volume = self._client['config']['volume']
new_volume['muted'] = status
self._client['config']['volume']['muted'] = status
yield from self._server.client_volume(self.identifier, new_volume)
_LOGGER.info('set muted to %s on %s', status, self.friendly_name) | def set_muted(self, status) | Set client mute status. | 4.713497 | 4.289181 | 1.098927 |
if percent not in range(0, 101):
raise ValueError('Volume percent out of range')
new_volume = self._client['config']['volume']
new_volume['percent'] = percent
self._client['config']['volume']['percent'] = percent
yield from self._server.client_volume(self.identifier, new_volume)
if update_group:
self._server.group(self.group.identifier).callback()
_LOGGER.info('set volume to %s on %s', percent, self.friendly_name) | def set_volume(self, percent, update_group=True) | Set client volume percent. | 3.915342 | 3.582322 | 1.092962 |
self._client['config']['volume'] = data['volume']
_LOGGER.info('updated volume on %s', self.friendly_name)
self._server.group(self.group.identifier).callback()
self.callback() | def update_volume(self, data) | Update volume. | 10.235662 | 9.799423 | 1.044517 |
self._client['config']['name'] = data['name']
_LOGGER.info('updated name on %s', self.friendly_name)
self.callback() | def update_name(self, data) | Update name. | 8.315992 | 7.561436 | 1.09979 |
self._client['config']['latency'] = data['latency']
_LOGGER.info('updated latency on %s', self.friendly_name)
self.callback() | def update_latency(self, data) | Update latency. | 8.854612 | 8.031633 | 1.102467 |
self._client['connected'] = status
_LOGGER.info('updated connected status to %s on %s', status, self.friendly_name)
self.callback() | def update_connected(self, status) | Update connected. | 7.066227 | 6.283016 | 1.124655 |
self._snapshot = {
'name': self.name,
'volume': self.volume,
'muted': self.muted,
'latency': self.latency
}
_LOGGER.info('took snapshot of current state of %s', self.friendly_name) | def snapshot(self) | Snapshot current state. | 5.175688 | 4.683719 | 1.105038 |
if not self._snapshot:
return
yield from self.set_name(self._snapshot['name'])
yield from self.set_volume(self._snapshot['volume'])
yield from self.set_muted(self._snapshot['muted'])
yield from self.set_latency(self._snapshot['latency'])
self.callback()
_LOGGER.info('restored snapshot of state of %s', self.friendly_name) | def restore(self) | Restore snapshotted state. | 4.008896 | 3.733715 | 1.073702 |
self._src.emit('push-buffer', Gst.Buffer.new_wrapped(buf)) | def push(self, buf) | Push a buffer into the source. | 13.443869 | 9.630778 | 1.395928 |
server = Snapserver(loop, host, port, reconnect)
yield from server.start()
return server | def create_server(loop, host, port=CONTROL_PORT, reconnect=False) | Server factory. | 7.631492 | 7.305881 | 1.044568 |
def fmt(field, direction):
return '{0}{1}'.format({-1: '-', 1: '+'}[direction], field)
if '$orderby' in son:
return ', '.join(fmt(f, d) for f, d in son['$orderby'].items()) | def _get_ordering(son) | Helper function to extract formatted ordering from dict. | 4.744548 | 4.634745 | 1.023691 |
django_path = os.path.realpath(os.path.dirname(django.__file__))
django_path = os.path.normpath(os.path.join(django_path, '..'))
socketserver_path = os.path.realpath(os.path.dirname(SocketServer.__file__))
pymongo_path = os.path.realpath(os.path.dirname(pymongo.__file__))
trace = []
for frame, path, line_no, func_name, text in (f[:5] for f in stack):
s_path = os.path.realpath(path)
# Support hiding of frames -- used in various utilities that provide
# inspection.
if '__traceback_hide__' in frame.f_locals:
continue
if getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {}).get('HIDE_DJANGO_SQL', True) \
and django_path in s_path and not 'django/contrib' in s_path:
continue
if socketserver_path in s_path:
continue
if pymongo_path in s_path:
continue
if not text:
text = ''
else:
text = (''.join(text)).strip()
trace.append((path, line_no, func_name, text))
return trace | def _tidy_stacktrace(stack) | Clean up stacktrace and remove all entries that:
1. Are part of Django (except contrib apps)
2. Are part of SocketServer (used by Django's dev server)
3. Are the last entry (which is part of our stacktracing code)
``stack`` should be a list of frame tuples from ``inspect.stack()`` | 3.071182 | 2.91326 | 1.054208 |
if iterable_or_scalar is None:
return ()
elif isinstance(iterable_or_scalar, string_types):
return (iterable_or_scalar,)
elif hasattr(iterable_or_scalar, "__iter__"):
return iterable_or_scalar
else:
return (iterable_or_scalar,) | def as_iterable(iterable_or_scalar) | Utility for converting an object to an iterable.
Parameters
----------
iterable_or_scalar : anything
Returns
-------
l : iterable
If `obj` was None, return the empty tuple.
If `obj` was not iterable returns a 1-tuple containing `obj`.
Otherwise return `obj`
Notes
-----
Although both string types and dictionaries are iterable in Python, we are treating them as not iterable in this
method. Thus, as_iterable(dict()) returns (dict, ) and as_iterable(string) returns (string, )
Exammples
---------
>>> as_iterable(1)
(1,)
>>> as_iterable([1, 2, 3])
[1, 2, 3]
>>> as_iterable("my string")
("my string", )
>>> as_iterable({'a': 1})
({'a': 1}, ) | 1.856788 | 1.73614 | 1.069492 |
return self.jvm.org.apache.spark.util.Utils.getContextOrSparkClassLoader() | def classloader(self) | Returns the private class loader that spark uses.
This is needed since jars added with --jars are not easily resolvable by py4j's classloader | 18.787186 | 9.636939 | 1.949497 |
if package_name is not None:
jcontainer = self.import_scala_package_object(package_name)
elif object_name is not None:
jcontainer = self.import_scala_object(object_name)
elif java_class_instance is not None:
jcontainer = java_class_instance
else:
raise RuntimeError("Expected one of package_name, object_name or java_class_instance")
return jcontainer | def get_java_container(self, package_name=None, object_name=None, java_class_instance=None) | Convenience method to get the container that houses methods we wish to call a method on. | 2.097247 | 2.021064 | 1.037694 |
def _(*cols):
jcontainer = self.get_java_container(package_name=package_name, object_name=object_name, java_class_instance=java_class_instance)
# Ensure that your argument is a column
col_args = [col._jc if isinstance(col, Column) else _make_col(col)._jc for col in cols]
function = getattr(jcontainer, name)
args = col_args
jc = function(*args)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _ | def wrap_function_cols(self, name, package_name=None, object_name=None, java_class_instance=None, doc="") | Utility method for wrapping a scala/java function that returns a spark sql Column.
This assumes that the function that you are wrapping takes a list of spark sql Column objects as its arguments. | 4.101581 | 3.960448 | 1.035636 |
def _(*cols):
jcontainer = self.get_java_container(package_name=package_name, object_name=object_name, java_class_instance=java_class_instance)
# Ensure that your argument is a column
function = getattr(jcontainer, name)
judf = function()
jc = judf.apply(self.to_scala_seq([_to_java_column(c) for c in cols]))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _ | def wrap_spark_sql_udf(self, name, package_name=None, object_name=None, java_class_instance=None, doc="") | Wraps a scala/java spark user defined function | 4.59857 | 4.786036 | 0.960831 |
target_dir = join(dirname(__file__), 'spylon', 'spark')
with open(join(target_dir, "spark_properties_{}.json".format(version)), 'w') as fp:
all_props = _fetch_documentation(version=version, base_url=base_url)
all_props = sorted(all_props, key=lambda x: x[0])
all_props_d = [{"property": p, "default": d, "description": desc} for p, d, desc in all_props]
json.dump(all_props_d, fp, indent=2) | def _save_documentation(version, base_url="https://spark.apache.org/docs") | Write the spark property documentation to a file | 3.224264 | 2.959278 | 1.089544 |
seconds = td.total_seconds()
sign_string = '-' if seconds < 0 else ''
seconds = abs(int(seconds))
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
d = dict(sign=sign_string, days=days, hours=hours, minutes=minutes, seconds=seconds)
if days > 0:
return '{sign}{days}d{hours:02d}h{minutes:02d}m:{seconds:02d}s'.format(**d)
elif hours > 0:
return '{sign}{hours:02d}h{minutes:02d}m:{seconds:02d}s'.format(**d)
elif minutes > 0:
return '{sign}{minutes:02d}m:{seconds:02d}s'.format(**d)
else:
return '{sign}{seconds:02d}s'.format(**d) | def _pretty_time_delta(td) | Creates a string representation of a time delta.
Parameters
----------
td : :class:`datetime.timedelta`
Returns
-------
pretty_formatted_datetime : str | 1.404077 | 1.455412 | 0.964729 |
dur = timedelta_formatter(duration)
percent = (stage_info.numCompletedTasks * bar_width) // stage_info.numTasks
bar = [' '] * bar_width
for i in range(bar_width):
char = ' '
if i < percent:
char = '='
if i == percent:
char = '>'
bar[i] = char
bar = ''.join(bar)
return "[Stage {info.stageId}:{bar} " \
"({info.numCompletedTasks} + {info.numActiveTasks} / {info.numTasks} Dur: {dur}]" \
.format(info=stage_info, dur=dur, bar=bar) | def _format_stage_info(bar_width, stage_info, duration, timedelta_formatter=_pretty_time_delta) | Formats the Spark stage progress.
Parameters
----------
bar_width : int
Width of the progressbar to print out.
stage_info : :class:`pyspark.status.StageInfo`
Information about the running stage
stage_id : int
Unique ID of the stage
duration : :class:`datetime.timedelta`
Duration of the stage so far
timedelta_formatter : callable
Converts a timedelta to a string.
Returns
-------
formatted : str | 3.279322 | 3.372422 | 0.972394 |
global _printer_singleton
if _printer_singleton is None:
_printer_singleton = ProgressPrinter(sc, timedelta_formatter, bar_width, sleep_time)
_printer_singleton.start()
return _printer_singleton | def start(sc, timedelta_formatter=_pretty_time_delta, bar_width=20, sleep_time=0.5) | Creates a :class:`ProgressPrinter` that polls the SparkContext for information
about active stage progress and prints that information to stderr.
The printer runs in a thread and is useful for showing text-based
progress bars in interactive environments (e.g., REPLs, Jupyter Notebooks).
This function creates a singleton printer instance and returns that instance
no matter what arguments are passed to this function again until :func:`stop`
is called to shutdown the singleton. If you want more control over the printer
lifecycle, create an instance of :class:`ProgressPrinter` directly and use its
methods.
Parameters
----------
sc: :class:`pyspark.context.SparkContext`, optional
SparkContext to use to create a new thread
timedelta_formatter : callable, optional
Converts a timedelta to a string.
bar_width : int, optional
Width of the progressbar to print out.
sleep_time : float, optional
Frequency in seconds with which to poll Apache Spark for task stage information.
Returns
-------
:class:`ProgressPrinter` | 2.954866 | 3.161077 | 0.934766 |
with self.condition:
self.paused = False
self.condition.notify_all() | def resume(self) | Resume progress updates. | 4.909375 | 4.254121 | 1.154028 |
last_status = ''
# lambda is used to avoid http://bugs.python.org/issue30473 in py36
start_times = defaultdict(lambda: datetime.datetime.now())
max_stage_id = -1
status = self.sc.statusTracker()
while True:
with self.condition:
if self.sc._jsc is None or not self.alive:
# End the thread
self.paused = True
break
elif self.paused:
# Pause the thread
self.condition.wait()
stage_ids = status.getActiveStageIds()
progressbar_list = []
# Only show first 3
stage_counter = 0
current_max_stage = max_stage_id
for stage_id in stage_ids:
stage_info = status.getStageInfo(stage_id)
if stage_info and stage_info.numTasks > 0:
# Set state variables used for flushing later
current_max_stage = stage_id
stage_counter += 1
td = datetime.datetime.now() - start_times[stage_id]
s = _format_stage_info(self.bar_width, stage_info, td, self.timedelta_formatter)
progressbar_list.append(s)
if stage_counter == 3:
break
# Ensure that when we get a new maximum stage id we print a \n
# to make the progress bar go on to the next line.
if current_max_stage > max_stage_id:
if last_status != '':
sys.stderr.write("\n")
sys.stderr.flush()
max_stage_id = current_max_stage
new_status = ' '.join(progressbar_list)
if new_status != last_status:
sys.stderr.write("\r" + new_status)
sys.stderr.flush()
last_status = new_status
time.sleep(self.sleep_time) | def run(self) | Run the progress printing loop. | 4.010051 | 3.930331 | 1.020283 |
env_dir = os.path.join(sandbox_dir, env_name)
cmdline = ["conda", "create", "--yes", "--copy", "--quiet", "-p", env_dir] + list(options) + dependencies
log.info("Creating conda environment: ")
log.info(" command line: %s", cmdline)
subprocess.check_call(cmdline, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
log.debug("Environment created")
return env_dir, env_name | def create_conda_env(sandbox_dir, env_name, dependencies, options=()) | Create a conda environment inside the current sandbox for the given list of dependencies and options.
Parameters
----------
sandbox_dir : str
env_name : str
dependencies : list
List of conda specs
options
List of additional options to pass to conda. Things like ["-c", "conda-forge"]
Returns
-------
(env_dir, env_name) | 2.773201 | 2.813555 | 0.985657 |
output_filename = env_dir + ".zip"
log.info("Archiving conda environment: %s -> %s", env_dir, output_filename)
subprocess.check_call(["zip", "-r", "-0", "-q", output_filename, env_dir])
return output_filename | def archive_dir(env_dir) | Compresses the directory and writes to its parent
Parameters
----------
env_dir : str
Returns
-------
str | 2.931716 | 3.455957 | 0.848308 |
from .launcher import SparkConfiguration
assert isinstance(spark_conf, SparkConfiguration)
yarn_python = os.path.join(".", "CONDA", env_name, "bin", "python")
archives = env_archive + "#CONDA"
new_spark_conf = copy.deepcopy(spark_conf)
new_spark_conf.master = "yarn"
new_spark_conf.deploy_mode = "client"
new_spark_conf.archives = [archives]
new_spark_conf.conf.set("spark.executorEnv.PYSPARK_PYTHON", yarn_python)
new_spark_conf._python_path = yarn_python
env_update = {
"PYSPARK_DRIVER_PYTHON": sys.executable,
"PYSPARK_PYTHON": yarn_python
}
os.environ.update(env_update)
return new_spark_conf | def prepare_pyspark_yarn_interactive(env_name, env_archive, spark_conf) | This ASSUMES that you have a compatible python environment running on the other side.
WARNING: Injects "PYSPARK_DRIVER_PYTHON" and "PYSPARK_PYTHON" as
environmental variables into your current environment
Parameters
----------
env_name : str
env_archive : str
spark_conf : SparkConfiguration
Examples
--------
>>> from spylon.spark import SparkConfiguration
>>> conf = SparkConfiguration()
>>> import spylon.spark.yarn_launcher as yl
>>> conf = yl.prepare_pyspark_yarn_interactive(
... env_name="yarn-pyspark-env", env_archive="hdfs:///path/to/conda_envs/yarn-pyspark-env.zip",
... spark_conf=conf
... )
... # Create our context
... sc, sqlC = conf.sql_context("conda-test")
... # Example of it working
... rdd = sc.parallelize(range(10), 10)
...
... def pandas_test(x):
... import numpy
... import pandas
... import sys
... import socket
... return [{"numpy": numpy.__version__, "pandas": pandas.__version__,
... "host": socket.getfqdn(), "python": sys.executable}]
...
... rdd.mapPartitions(pandas_test).collect()
Returns
-------
SparkConfiguration
Copy of `spark_conf` input with added Yarn requirements. | 3.133428 | 2.977565 | 1.052346 |
env = dict(os.environ)
yarn_python = os.path.join(".", "CONDA", env_name, "bin", "python")
archives = env_archive + "#CONDA"
prepend_args = [
"--master", "yarn",
"--deploy-mode", "cluster",
"--conf", "spark.yarn.appMasterEnv.PYSPARK_PYTHON={}".format(yarn_python),
"--archives", archives,
]
env_update = {
"PYSPARK_PYTHON": yarn_python
}
env.update(env_update)
spark_submit = os.path.join(env["SPARK_HOME"], "bin", "spark-submit")
log.info("Running spark in YARN-client mode with added arguments")
log.info(" args: %s", pprint.pprint(prepend_args, indent=4))
log.info(" env: %s", pprint.pprint(env_update, indent=4))
# REPLACE our python process with another one
subprocess.check_call([spark_submit] + prepend_args + args, env=env) | def run_pyspark_yarn_cluster(env_dir, env_name, env_archive, args) | Initializes the requires spark command line options on order to start a python job with the given python environment.
Parameters
----------
env_dir : str
env_name : str
env_archive : str
args : list
Returns
-------
This call will spawn a child process and block until that is complete. | 3.408504 | 3.609551 | 0.944301 |
spark_args = args.copy()
# Scan through the arguments to find --conda
# TODO: make this optional, if not specified ignore all the python stuff
# Is this double dash in front of conda env correct?
i = spark_args.index("--conda-env")
# pop off the '--conda-env' portion and just drop it on the floor
spark_args.pop(i)
# Now pop off the actual conda env var passed to the launcher
conda_env = spark_args.pop(i)
cleanup_functions = []
# What else could this possibly be other than a string here?
assert isinstance(conda_env, str)
func_kwargs = {'conda_env': conda_env,
'deploy_mode': deploy_mode,
'working_dir': working_dir,
'cleanup_functions': cleanup_functions}
if conda_env.startswith("hdfs:/"):
# "hadoop fs -ls" can return URLs with only a single "/" after the "hdfs:" scheme
env_name, env_dir, env_archive = _conda_from_hdfs(**func_kwargs)
elif conda_env.endswith(".zip"):
# We have a precreated conda environment around.
env_name, env_dir, conda_env = _conda_from_zip(**func_kwargs)
elif conda_env.endswith(".yaml"):
# The case where we have to CREATE the environment ourselves
env_name, env_dir, env_archive = _conda_from_yaml(**func_kwargs)
else:
raise NotImplementedError("Can only run launcher if your conda env is on hdfs (starts "
"with 'hdfs:/', is already a zip (ends with '.zip'), or is "
"coming from a yaml specification (ends with '.yaml' and "
"conforms to the conda environment.yaml spec)")
del func_kwargs
func_kwargs = dict(env_dir=env_dir, env_name=env_name, env_archive=env_archive, args=spark_args)
funcs = {'client': run_pyspark_yarn_client, 'cluster': run_pyspark_yarn_cluster}
try:
funcs[deploy_mode](**func_kwargs)
finally:
if not cleanup:
return
# iterate over and call all cleanup functions
for function in cleanup_functions:
try:
function()
except:
log.exception("Cleanup function %s failed", function) | def launcher(deploy_mode, args, working_dir=".", cleanup=True) | Initializes arguments and starts up pyspark with the correct deploy mode and environment.
Parameters
----------
deploy_mode : {"client", "cluster"}
args : list
Arguments to pass onwards to spark submit.
working_dir : str, optional
Path to working directory to use for creating conda environments. Defaults to the current working directory.
cleanup : bool, optional
Clean up extracted / generated files. This defaults to true since conda environments can be rather large.
Returns
-------
This call will spawn a child process and block until that is complete. | 5.221249 | 5.151306 | 1.013578 |
with zipfile.ZipFile(local_archive) as z:
z.extractall(working_dir)
archive_filenames = z.namelist()
root_elements = {m.split(posixpath.sep, 1)[0] for m in archive_filenames}
abs_archive_filenames = [os.path.abspath(os.path.join(working_dir, f)) for f in root_elements]
def cleanup():
for fn in abs_archive_filenames:
if os.path.isdir(fn):
shutil.rmtree(fn)
else:
os.unlink(fn)
cleanup_functions.append(cleanup)
env_dir = os.path.join(working_dir, env_name)
# Because of a python deficiency (Issue15795), the execute bits aren't
# preserved when the zip file is unzipped. Need to add them back here.
_fix_permissions(env_dir)
return env_dir | def _extract_local_archive(working_dir, cleanup_functions, env_name, local_archive) | Helper internal function for extracting a zipfile and ensure that a cleanup is queued.
Parameters
----------
working_dir : str
cleanup_functions : List[() -> NoneType]
env_name : str
local_archive : str | 3.347492 | 3.586323 | 0.933405 |
rv = factory()
for k, v in iteritems(d):
if predicate(k):
rv[k] = v
return rv | def keyfilter(predicate, d, factory=dict) | Filter items in dictionary by key
>>> iseven = lambda x: x % 2 == 0
>>> d = {1: 2, 2: 3, 3: 4, 4: 5}
>>> keyfilter(iseven, d)
{2: 3, 4: 5}
See Also:
valfilter
itemfilter
keymap | 2.453574 | 5.77387 | 0.424944 |
if conf is None:
conf = default_configuration
assert isinstance(conf, SparkConfiguration)
sc = conf.spark_context(application_name)
try:
yield sc
finally:
sc.stop() | def with_spark_context(application_name, conf=None) | Context manager for a spark context
Parameters
----------
application_name : string
conf : string, optional
Returns
-------
sc : SparkContext
Examples
--------
Used within a context manager
>>> with with_spark_context("MyApplication") as sc:
... # Your Code here
... pass | 3.47732 | 5.835046 | 0.595937 |
if conf is None:
conf = default_configuration
assert isinstance(conf, SparkConfiguration)
sc = conf.spark_context(application_name)
import pyspark.sql
try:
yield sc, pyspark.sql.SQLContext(sc)
finally:
sc.stop() | def with_sql_context(application_name, conf=None) | Context manager for a spark context
Returns
-------
sc : SparkContext
sql_context: SQLContext
Examples
--------
Used within a context manager
>>> with with_sql_context("MyApplication") as (sc, sql_context):
... import pyspark
... # Do stuff
... pass | 3.541167 | 4.661796 | 0.759614 |
if key not in self._conf_dict:
self.set(key, value)
return self | def set_if_unset(self, key, value) | Set a particular spark property by the string key name if it hasn't already been set.
This method allows chaining so that i can provide a similar feel to the standard Scala way of setting
multiple configurations
Parameters
----------
key : string
value : string
Returns
-------
self | 4.468961 | 7.53835 | 0.59283 |
from IPython.lib.pretty import RepresentationPrinter
assert isinstance(p, RepresentationPrinter)
p.begin_group(1, "SparkConfiguration(")
def kv(k, v, do_comma=True):
p.text(k)
p.pretty(v)
if do_comma:
p.text(", ")
p.breakable()
kv("launcher_arguments: ", self._spark_launcher_args)
kv("conf: ", self._spark_conf_helper)
kv("spark_home: ", self.spark_home)
kv("python_path: ", self._python_path, False)
p.end_group(1, ')') | def _repr_pretty_(self, p, cycle) | Pretty printer for the spark cnofiguration | 4.11503 | 3.570519 | 1.152502 |
value = self._spark_launcher_args.get(driver_arg_key, self.conf._conf_dict.get(spark_property_key))
if value:
self._spark_launcher_args[driver_arg_key] = value
self.conf[spark_property_key] = value | def _set_launcher_property(self, driver_arg_key, spark_property_key) | Handler for a special property that exists in both the launcher arguments and the spark conf dictionary.
This will use the launcher argument if set falling back to the spark conf argument. If neither are set this is
a noop (which means that the standard spark defaults will be used).
Since `spark.driver.memory` (eg) can be set erroneously by a user on the standard spark conf, we want to be able
to use that value if present. If we do not have this fall-back behavior then these settings are IGNORED when
starting up the spark driver JVM under client mode (standalone, local, yarn-client or mesos-client).
Parameters
----------
driver_arg_key : string
Eg: "driver-memory"
spark_property_key : string
Eg: "spark.driver.memory" | 2.796297 | 3.088641 | 0.905349 |
cmd = []
# special case for driver JVM properties.
self._set_launcher_property("driver-memory", "spark.driver.memory")
self._set_launcher_property("driver-library-path", "spark.driver.extraLibraryPath")
self._set_launcher_property("driver-class-path", "spark.driver.extraClassPath")
self._set_launcher_property("driver-java-options", "spark.driver.extraJavaOptions")
self._set_launcher_property("executor-memory", "spark.executor.memory")
self._set_launcher_property("executor-cores", "spark.executor.cores")
for key, val in self._spark_launcher_args.items():
if val is None:
continue
val = list(as_iterable(val))
if len(val):
if key in self._boolean_args:
cmd.append("--{key}".format(key=key))
else:
sep = self._spark_launcher_arg_sep.get(key, ',')
cmd.append('--{key} {val}'.format(key=key, val=sep.join(str(x) for x in val)))
cmd += ['pyspark-shell']
cmd_line = ' '.join(x for x in cmd if x)
os.environ["PYSPARK_SUBMIT_ARGS"] = cmd_line
log.info("spark-submit arguments: %s", cmd_line) | def _set_environment_variables(self) | Initializes the correct environment variables for spark | 2.545769 | 2.431442 | 1.04702 |
global _SPARK_INITIALIZED
spark_home = self.spark_home
python_path = self._python_path
if use_findspark:
if _SPARK_INITIALIZED:
if spark_home == os.environ["SPARK_HOME"]:
# matches with already initialized
pass
else:
# findspark adds two path to the search path.
sys.path.pop(0)
sys.path.pop(0)
findspark.init(spark_home=spark_home, edit_rc=False, edit_profile=False, python_path=python_path)
else:
findspark.init(spark_home=spark_home, edit_rc=False, edit_profile=False, python_path=python_path)
_SPARK_INITIALIZED = True
self._set_environment_variables() | def _init_spark(self) | Initializes spark so that pyspark is importable. This also sets up the required environment variables | 3.917612 | 3.858589 | 1.015296 |
# initialize the spark configuration
self._init_spark()
import pyspark
import pyspark.sql
# initialize conf
spark_conf = pyspark.SparkConf()
for k, v in self._spark_conf_helper._conf_dict.items():
spark_conf.set(k, v)
log.info("Starting SparkContext")
return pyspark.SparkContext(appName=application_name, conf=spark_conf) | def spark_context(self, application_name) | Create a spark context given the parameters configured in this class.
The caller is responsible for calling ``.close`` on the resulting spark context
Parameters
----------
application_name : string
Returns
-------
sc : SparkContext | 3.731073 | 4.003668 | 0.931914 |
sc = self.spark_context(application_name)
import pyspark
sqlContext = pyspark.SQLContext(sc)
return (sc, sqlContext) | def sql_context(self, application_name) | Create a spark context given the parameters configured in this class.
The caller is responsible for calling ``.close`` on the resulting spark context
Parameters
----------
application_name : string
Returns
-------
sc : SparkContext | 4.180767 | 4.221929 | 0.990251 |
headers = {'Content-Type': 'application/json',
'User-Agent': 'WePay Python SDK'}
url = self.api_endpoint + uri
if self.access_token or token:
headers['Authorization'] = 'Bearer ' + \
(token if token else self.access_token)
if self.api_version:
headers['Api-Version'] = self.api_version
if risk_token:
headers['WePay-Risk-Token'] = risk_token
if client_ip:
headers['Client-IP'] = client_ip
if params:
params = json.dumps(params)
try:
response = self.requests.post(
url, data=params, headers=headers,
timeout=self.request_timeout)
return response.json()
except:
if 400 <= response.status_code <= 599:
raise Exception('Unknown error. Please contact support@wepay.com') | def call(self, uri, params=None, token=None, risk_token=None, client_ip=None) | Calls wepay.com/v2/``uri`` with ``params`` and returns the JSON
response as a python dict. The optional token parameter will override
the instance's access_token if it is set.
:param str uri: The URI on the API endpoint to call.
:param dict params: The parameters to pass to the URI.
:param str token: Optional override for this ``WePay`` object's access
token.
:param str risk_token: Optional WePay-Risk-Token for this API call.
:param str client_ip: Optional Client-IP for this API call. | 2.377758 | 2.298681 | 1.034401 |
if not options:
options = {}
if not scope:
scope = "manage_accounts,collect_payments," \
"view_user,preapprove_payments," \
"manage_subscriptions,send_money"
options['scope'] = scope
options['redirect_uri'] = redirect_uri
options['client_id'] = client_id
return self.browser_endpoint + '/oauth2/authorize?' + \
urllib.urlencode(options) | def get_authorization_url(self, redirect_uri, client_id, options=None,
scope=None) | Returns a URL to send the user to in order to get authorization.
After getting authorization the user will return to redirect_uri.
Optionally, scope can be set to limit permissions, and the options
dict can be loaded with any combination of state, user_name
or user_email.
:param str redirect_uri: The URI to redirect to after a authorization.
:param str client_id: The client ID issued by WePay to your app.
:keyword dict options: Allows for passing additional values to the
authorize call, aside from scope, redirect_uri, and etc.
:keyword str scope: A comma-separated string of permissions. | 4.058229 | 4.702104 | 0.863067 |
params = {
'redirect_uri': redirect_uri,
'client_id': client_id,
'client_secret': client_secret,
'code': code,
}
if callback_uri:
params.update({'callback_uri': callback_uri})
response = self.call('/oauth2/token', params)
# The call to /oauth2/token should return an access_token
# if the access_token was not returned, then an error occured
# we need to raise this error,
# otherwise this will die when trying to use the 'access_token' field
if 'access_token' not in response:
raise WePayError(response['error'], response['error_code'], response['error_description'])
self.access_token = response['access_token']
return response | def get_token(
self, redirect_uri, client_id, client_secret,
code, callback_uri=None) | Calls wepay.com/v2/oauth2/token to get an access token. Sets the
access_token for the WePay instance and returns the entire response
as a dict. Should only be called after the user returns from being
sent to get_authorization_url.
:param str redirect_uri: The same URI specified in the
:py:meth:`get_authorization_url` call that preceeded this.
:param str client_id: The client ID issued by WePay to your app.
:param str client_secret: The client secret issued by WePay
to your app.
:param str code: The code returned by :py:meth:`get_authorization_url`.
:param str callback_uri: The callback_uri you want to receive IPNs for
this user on. | 3.173223 | 3.309906 | 0.958705 |
Visualizer3D._scene = Scene(background_color=np.array(bgcolor))
Visualizer3D._scene.ambient_light = AmbientLight(color=[1.0, 1.0, 1.0], strength=1.0)
Visualizer3D._init_size = np.array(size) | def figure(bgcolor=(1,1,1), size=(1000,1000)) | Create a blank figure.
Parameters
----------
bgcolor : (3,) float
Color of the background with values in [0,1].
size : (2,) int
Width and height of the figure in pixels. | 3.665755 | 4.510639 | 0.812691 |
x = SceneViewer(Visualizer3D._scene,
size=Visualizer3D._init_size,
animate=animate,
animate_axis=axis,
save_directory=Visualizer3D._save_directory,
**kwargs)
if x.save_directory:
Visualizer3D._save_directory = x.save_directory
if clf:
Visualizer3D.clf() | def show(animate=False, axis=np.array([0.,0.,1.]), clf=True, **kwargs) | Display the current figure and enable interaction.
Parameters
----------
animate : bool
Whether or not to animate the scene.
axis : (3,) float or None
If present, the animation will rotate about the given axis in world coordinates.
Otherwise, the animation will rotate in azimuth.
clf : bool
If true, the Visualizer is cleared after showing the figure.
kwargs : dict
Other keyword arguments for the SceneViewer instance. | 4.925633 | 5.456617 | 0.90269 |
v = SceneViewer(Visualizer3D._scene,
size=Visualizer3D._init_size,
animate=(n_frames > 1),
animate_axis=axis,
max_frames=n_frames,
**kwargs)
if clf:
Visualizer3D.clf()
return v.saved_frames | def render(n_frames=1, axis=np.array([0.,0.,1.]), clf=True, **kwargs) | Render frames from the viewer.
Parameters
----------
n_frames : int
Number of frames to render. If more than one, the scene will animate.
axis : (3,) float or None
If present, the animation will rotate about the given axis in world coordinates.
Otherwise, the animation will rotate in azimuth.
clf : bool
If true, the Visualizer is cleared after rendering the figure.
kwargs : dict
Other keyword arguments for the SceneViewer instance.
Returns
-------
list of perception.ColorImage
A list of ColorImages rendered from the viewer. | 8.10718 | 8.856536 | 0.915389 |
if n_frames >1 and os.path.splitext(filename)[1] != '.gif':
raise ValueError('Expected .gif file for multiple-frame save.')
v = SceneViewer(Visualizer3D._scene,
size=Visualizer3D._init_size,
animate=(n_frames > 1),
animate_axis=axis,
max_frames=n_frames,
**kwargs)
data = [m.data for m in v.saved_frames]
if len(data) > 1:
imageio.mimwrite(filename, data, fps=v._animate_rate, palettesize=128, subrectangles=True)
else:
imageio.imwrite(filename, data[0])
if clf:
Visualizer3D.clf() | def save(filename, n_frames=1, axis=np.array([0.,0.,1.]), clf=True, **kwargs) | Save frames from the viewer out to a file.
Parameters
----------
filename : str
The filename in which to save the output image. If more than one frame,
should have extension .gif.
n_frames : int
Number of frames to render. If more than one, the scene will animate.
axis : (3,) float or None
If present, the animation will rotate about the given axis in world coordinates.
Otherwise, the animation will rotate in azimuth.
clf : bool
If true, the Visualizer is cleared after rendering the figure.
kwargs : dict
Other keyword arguments for the SceneViewer instance. | 4.572041 | 4.243904 | 1.07732 |
n_frames = framerate * time
az = 2.0 * np.pi / n_frames
Visualizer3D.save(filename, n_frames=n_frames, axis=axis, clf=clf,
animate_rate=framerate, animate_az=az)
if clf:
Visualizer3D.clf() | def save_loop(filename, framerate=30, time=3.0, axis=np.array([0.,0.,1.]), clf=True, **kwargs) | Off-screen save a GIF of one rotation about the scene.
Parameters
----------
filename : str
The filename in which to save the output image (should have extension .gif)
framerate : int
The frame rate at which to animate motion.
time : float
The number of seconds for one rotation.
axis : (3,) float or None
If present, the animation will rotate about the given axis in world coordinates.
Otherwise, the animation will rotate in azimuth.
clf : bool
If true, the Visualizer is cleared after rendering the figure.
kwargs : dict
Other keyword arguments for the SceneViewer instance. | 5.303604 | 5.940477 | 0.892791 |
Visualizer3D._scene = Scene(background_color=Visualizer3D._scene.background_color)
Visualizer3D._scene.ambient_light = AmbientLight(color=[1.0, 1.0, 1.0], strength=1.0) | def clf() | Clear the current figure | 4.22286 | 4.351744 | 0.970383 |
if isinstance(points, BagOfPoints):
if points.dim != 3:
raise ValueError('BagOfPoints must have dimension 3xN!')
else:
if type(points) is not np.ndarray:
raise ValueError('Points visualizer expects BagOfPoints or numpy array!')
if len(points.shape) == 1:
points = points[:,np.newaxis].T
if len(points.shape) != 2 or points.shape[1] != 3:
raise ValueError('Numpy array of points must have dimension (N,3)')
frame = 'points'
if T_points_world:
frame = T_points_world.from_frame
points = PointCloud(points.T, frame=frame)
color = np.array(color)
if subsample is not None:
num_points = points.num_points
points, inds = points.subsample(subsample, random=random)
if color.shape[0] == num_points and color.shape[1] == 3:
color = color[inds,:]
# transform into world frame
if points.frame != 'world':
if T_points_world is None:
T_points_world = RigidTransform(from_frame=points.frame, to_frame='world')
points_world = T_points_world * points
else:
points_world = points
point_data = points_world.data
if len(point_data.shape) == 1:
point_data = point_data[:,np.newaxis]
point_data = point_data.T
mpcolor = color
if len(color.shape) > 1:
mpcolor = color[0]
mp = MaterialProperties(
color = np.array(mpcolor),
k_a = 0.5,
k_d = 0.3,
k_s = 0.0,
alpha = 10.0,
smooth=True
)
# For each point, create a sphere of the specified color and size.
sphere = trimesh.creation.uv_sphere(scale, [n_cuts, n_cuts])
raw_pose_data = np.tile(np.eye(4), (points.num_points, 1))
raw_pose_data[3::4, :3] = point_data
instcolor = None
if color.ndim == 2 and color.shape[0] == points.num_points and color.shape[1] == 3:
instcolor = color
obj = InstancedSceneObject(sphere, raw_pose_data=raw_pose_data, colors=instcolor, material=mp)
if name is None:
name = str(uuid.uuid4())
Visualizer3D._scene.add_object(name, obj) | def points(points, T_points_world=None, color=np.array([0,1,0]), scale=0.01, n_cuts=20, subsample=None, random=False, name=None) | Scatter a point cloud in pose T_points_world.
Parameters
----------
points : autolab_core.BagOfPoints or (n,3) float
The point set to visualize.
T_points_world : autolab_core.RigidTransform
Pose of points, specified as a transformation from point frame to world frame.
color : (3,) or (n,3) float
Color of whole cloud or per-point colors
scale : float
Radius of each point.
n_cuts : int
Number of longitude/latitude lines on sphere points.
subsample : int
Parameter of subsampling to display fewer points.
name : str
A name for the object to be added. | 3.031721 | 2.856201 | 1.061452 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.