_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q15400
Snapserver.delete_client
train
def delete_client(self, identifier): """Delete client.""" params = {'id': identifier} response = yield from self._transact(SERVER_DELETECLIENT, params) self.synchronize(response)
python
{ "resource": "" }
q15401
Snapserver.synchronize
train
def synchronize(self, status): """Synchronize snapserver.""" self._version = status.get('server').get('version') self._groups = {} self._clients = {} self._streams = {} for stream in status.get('server').get('streams'): self._streams[stream.get('id')] = Snapstream(stream) _LOGGER.debug('stream found: %s', self._streams[stream.get('id')]) for group in status.get('server').get('groups'): self._groups[group.get('id')] = Snapgroup(self, group) _LOGGER.debug('group found: %s', self._groups[group.get('id')]) for client in group.get('clients'): self._clients[client.get('id')] = Snapclient(self, client) _LOGGER.debug('client found: %s', self._clients[client.get('id')])
python
{ "resource": "" }
q15402
Snapserver._request
train
def _request(self, method, identifier, key=None, value=None): """Perform request with identifier.""" params = {'id': identifier} if key is not None and value is not None: params[key] = value result = yield from self._transact(method, params) return result.get(key)
python
{ "resource": "" }
q15403
Snapserver._on_server_disconnect
train
def _on_server_disconnect(self, exception): """Handle server disconnection.""" self._protocol = None if self._on_disconnect_callback_func and callable(self._on_disconnect_callback_func): self._on_disconnect_callback_func(exception) if self._reconnect: self._reconnect_cb()
python
{ "resource": "" }
q15404
Snapserver._on_group_mute
train
def _on_group_mute(self, data): """Handle group mute.""" self._groups.get(data.get('id')).update_mute(data)
python
{ "resource": "" }
q15405
Snapserver._on_group_stream_changed
train
def _on_group_stream_changed(self, data): """Handle group stream change.""" self._groups.get(data.get('id')).update_stream(data)
python
{ "resource": "" }
q15406
Snapserver._on_client_connect
train
def _on_client_connect(self, data): """Handle client connect.""" client = None if data.get('id') in self._clients: client = self._clients[data.get('id')] client.update_connected(True) else: client = Snapclient(self, data.get('client')) self._clients[data.get('id')] = client if self._new_client_callback_func and callable(self._new_client_callback_func): self._new_client_callback_func(client) _LOGGER.info('client %s connected', client.friendly_name)
python
{ "resource": "" }
q15407
Snapserver._on_client_disconnect
train
def _on_client_disconnect(self, data): """Handle client disconnect.""" self._clients[data.get('id')].update_connected(False) _LOGGER.info('client %s disconnected', self._clients[data.get('id')].friendly_name)
python
{ "resource": "" }
q15408
Snapserver._on_client_volume_changed
train
def _on_client_volume_changed(self, data): """Handle client volume change.""" self._clients.get(data.get('id')).update_volume(data)
python
{ "resource": "" }
q15409
Snapserver._on_client_name_changed
train
def _on_client_name_changed(self, data): """Handle client name changed.""" self._clients.get(data.get('id')).update_name(data)
python
{ "resource": "" }
q15410
Snapserver._on_client_latency_changed
train
def _on_client_latency_changed(self, data): """Handle client latency changed.""" self._clients.get(data.get('id')).update_latency(data)
python
{ "resource": "" }
q15411
Snapserver._on_stream_update
train
def _on_stream_update(self, data): """Handle stream update.""" self._streams[data.get('id')].update(data.get('stream')) _LOGGER.info('stream %s updated', self._streams[data.get('id')].friendly_name) for group in self._groups.values(): if group.stream == data.get('id'): group.callback()
python
{ "resource": "" }
q15412
mac
train
def mac(): """ Get MAC. """ from uuid import getnode as get_mac return ':'.join(("%012x" % get_mac())[i:i+2] for i in range(0, 12, 2))
python
{ "resource": "" }
q15413
Client.register
train
def register(self): """ Transact with server. """ self._queue.put(hello_packet(socket.gethostname(), mac(), __version__)) self._queue.put(request_packet(MSG_SERVER_SETTINGS)) self._queue.put(request_packet(MSG_SAMPLE_FORMAT)) self._queue.put(request_packet(MSG_HEADER))
python
{ "resource": "" }
q15414
Client.request_start
train
def request_start(self): """ Indicate readiness to receive stream. This is a blocking call. """ self._queue.put(command_packet(CMD_START_STREAM)) _LOGGER.info('Requesting stream') self._source.run()
python
{ "resource": "" }
q15415
Client._read_socket
train
def _read_socket(self): """ Process incoming messages from socket. """ while True: base_bytes = self._socket.recv(BASE_SIZE) base = basemessage.parse(base_bytes) payload_bytes = self._socket.recv(base.payload_length) self._handle_message(packet.parse(base_bytes + payload_bytes))
python
{ "resource": "" }
q15416
Client._handle_message
train
def _handle_message(self, data): """ Handle messages. """ if data.type == MSG_SERVER_SETTINGS: _LOGGER.info(data.payload) elif data.type == MSG_SAMPLE_FORMAT: _LOGGER.info(data.payload) self._connected = True elif data.type == MSG_TIME: if not self._buffered: _LOGGER.info('Buffering') elif data.type == MSG_HEADER: # Push to app source and start playing. _LOGGER.info(data.payload.codec.decode('ascii')) self._source.push(data.payload.header) self._source.play() elif data.type == MSG_WIRE_CHUNK: # Add chunks to play queue. self._buffer.put(data.payload.chunk) if self._buffer.qsize() > BUFFER_SIZE: self._buffered = True if self._buffer.empty(): self._buffered = False
python
{ "resource": "" }
q15417
Client._write_socket
train
def _write_socket(self): """ Pass messages from queue to socket. """ while True: now = time.time() if self._connected and (self._last_sync + SYNC_AFTER) < now: self._queue.put(request_packet(MSG_TIME)) self._last_sync = now if not self._queue.empty(): self._socket.send(self._queue.get())
python
{ "resource": "" }
q15418
Client._play
train
def _play(self): """ Relay buffer to app source. """ while True: if self._buffered: self._source.push(self._buffer.get())
python
{ "resource": "" }
q15419
Snapgroup.set_stream
train
def set_stream(self, stream_id): """Set group stream.""" self._group['stream_id'] = stream_id yield from self._server.group_stream(self.identifier, stream_id) _LOGGER.info('set stream to %s on %s', stream_id, self.friendly_name)
python
{ "resource": "" }
q15420
Snapgroup.set_muted
train
def set_muted(self, status): """Set group mute status.""" self._group['muted'] = status yield from self._server.group_mute(self.identifier, status) _LOGGER.info('set muted to %s on %s', status, self.friendly_name)
python
{ "resource": "" }
q15421
Snapgroup.volume
train
def volume(self): """Get volume.""" volume_sum = 0 for client in self._group.get('clients'): volume_sum += self._server.client(client.get('id')).volume return int(volume_sum / len(self._group.get('clients')))
python
{ "resource": "" }
q15422
Snapgroup.add_client
train
def add_client(self, client_identifier): """Add a client.""" if client_identifier in self.clients: _LOGGER.error('%s already in group %s', client_identifier, self.identifier) return new_clients = self.clients new_clients.append(client_identifier) yield from self._server.group_clients(self.identifier, new_clients) _LOGGER.info('added %s to %s', client_identifier, self.identifier) self._server.client(client_identifier).callback() self.callback()
python
{ "resource": "" }
q15423
Snapgroup.remove_client
train
def remove_client(self, client_identifier): """Remove a client.""" new_clients = self.clients new_clients.remove(client_identifier) yield from self._server.group_clients(self.identifier, new_clients) _LOGGER.info('removed %s from %s', client_identifier, self.identifier) self._server.client(client_identifier).callback() self.callback()
python
{ "resource": "" }
q15424
Snapgroup.update_mute
train
def update_mute(self, data): """Update mute.""" self._group['muted'] = data['mute'] self.callback() _LOGGER.info('updated mute on %s', self.friendly_name)
python
{ "resource": "" }
q15425
Snapgroup.update_stream
train
def update_stream(self, data): """Update stream.""" self._group['stream_id'] = data['stream_id'] self.callback() _LOGGER.info('updated stream to %s on %s', self.stream, self.friendly_name)
python
{ "resource": "" }
q15426
Snapgroup.callback
train
def callback(self): """Run callback.""" if self._callback_func and callable(self._callback_func): self._callback_func(self)
python
{ "resource": "" }
q15427
map_helper
train
def map_helper(data): """ Build a map message. """ as_list = [] length = 2 for field, value in data.items(): as_list.append(Container(field=bytes(field, ENCODING), value=bytes(value, ENCODING))) length += len(field) + len(value) + 4 return (Container( num=len(as_list), map=as_list ), length)
python
{ "resource": "" }
q15428
command_packet
train
def command_packet(cmd): """ Build a command message. """ return message('Command', Container(string_length=len(cmd), string=bytes(cmd, ENCODING)), len(cmd) + 2)
python
{ "resource": "" }
q15429
Snapclient.group
train
def group(self): """Get group.""" for group in self._server.groups: if self.identifier in group.clients: return group
python
{ "resource": "" }
q15430
Snapclient.friendly_name
train
def friendly_name(self): """Get friendly name.""" if len(self._client.get('config').get('name')): return self._client.get('config').get('name') return self._client.get('host').get('name')
python
{ "resource": "" }
q15431
Snapclient.set_name
train
def set_name(self, name): """Set a client name.""" if not name: name = '' self._client['config']['name'] = name yield from self._server.client_name(self.identifier, name)
python
{ "resource": "" }
q15432
Snapclient.set_latency
train
def set_latency(self, latency): """Set client latency.""" self._client['config']['latency'] = latency yield from self._server.client_latency(self.identifier, latency)
python
{ "resource": "" }
q15433
Snapclient.set_muted
train
def set_muted(self, status): """Set client mute status.""" new_volume = self._client['config']['volume'] new_volume['muted'] = status self._client['config']['volume']['muted'] = status yield from self._server.client_volume(self.identifier, new_volume) _LOGGER.info('set muted to %s on %s', status, self.friendly_name)
python
{ "resource": "" }
q15434
Snapclient.set_volume
train
def set_volume(self, percent, update_group=True): """Set client volume percent.""" if percent not in range(0, 101): raise ValueError('Volume percent out of range') new_volume = self._client['config']['volume'] new_volume['percent'] = percent self._client['config']['volume']['percent'] = percent yield from self._server.client_volume(self.identifier, new_volume) if update_group: self._server.group(self.group.identifier).callback() _LOGGER.info('set volume to %s on %s', percent, self.friendly_name)
python
{ "resource": "" }
q15435
Snapclient.update_volume
train
def update_volume(self, data): """Update volume.""" self._client['config']['volume'] = data['volume'] _LOGGER.info('updated volume on %s', self.friendly_name) self._server.group(self.group.identifier).callback() self.callback()
python
{ "resource": "" }
q15436
Snapclient.update_name
train
def update_name(self, data): """Update name.""" self._client['config']['name'] = data['name'] _LOGGER.info('updated name on %s', self.friendly_name) self.callback()
python
{ "resource": "" }
q15437
Snapclient.update_latency
train
def update_latency(self, data): """Update latency.""" self._client['config']['latency'] = data['latency'] _LOGGER.info('updated latency on %s', self.friendly_name) self.callback()
python
{ "resource": "" }
q15438
Snapclient.update_connected
train
def update_connected(self, status): """Update connected.""" self._client['connected'] = status _LOGGER.info('updated connected status to %s on %s', status, self.friendly_name) self.callback()
python
{ "resource": "" }
q15439
GstreamerAppSrc.push
train
def push(self, buf): """ Push a buffer into the source. """ self._src.emit('push-buffer', Gst.Buffer.new_wrapped(buf))
python
{ "resource": "" }
q15440
create_server
train
def create_server(loop, host, port=CONTROL_PORT, reconnect=False): """Server factory.""" server = Snapserver(loop, host, port, reconnect) yield from server.start() return server
python
{ "resource": "" }
q15441
_get_ordering
train
def _get_ordering(son): """Helper function to extract formatted ordering from dict. """ def fmt(field, direction): return '{0}{1}'.format({-1: '-', 1: '+'}[direction], field) if '$orderby' in son: return ', '.join(fmt(f, d) for f, d in son['$orderby'].items())
python
{ "resource": "" }
q15442
as_iterable
train
def as_iterable(iterable_or_scalar): """Utility for converting an object to an iterable. Parameters ---------- iterable_or_scalar : anything Returns ------- l : iterable If `obj` was None, return the empty tuple. If `obj` was not iterable returns a 1-tuple containing `obj`. Otherwise return `obj` Notes ----- Although both string types and dictionaries are iterable in Python, we are treating them as not iterable in this method. Thus, as_iterable(dict()) returns (dict, ) and as_iterable(string) returns (string, ) Exammples --------- >>> as_iterable(1) (1,) >>> as_iterable([1, 2, 3]) [1, 2, 3] >>> as_iterable("my string") ("my string", ) >>> as_iterable({'a': 1}) ({'a': 1}, ) """ if iterable_or_scalar is None: return () elif isinstance(iterable_or_scalar, string_types): return (iterable_or_scalar,) elif hasattr(iterable_or_scalar, "__iter__"): return iterable_or_scalar else: return (iterable_or_scalar,)
python
{ "resource": "" }
q15443
SparkJVMHelpers.classloader
train
def classloader(self): """Returns the private class loader that spark uses. This is needed since jars added with --jars are not easily resolvable by py4j's classloader """ return self.jvm.org.apache.spark.util.Utils.getContextOrSparkClassLoader()
python
{ "resource": "" }
q15444
SparkJVMHelpers.get_java_container
train
def get_java_container(self, package_name=None, object_name=None, java_class_instance=None): """Convenience method to get the container that houses methods we wish to call a method on. """ if package_name is not None: jcontainer = self.import_scala_package_object(package_name) elif object_name is not None: jcontainer = self.import_scala_object(object_name) elif java_class_instance is not None: jcontainer = java_class_instance else: raise RuntimeError("Expected one of package_name, object_name or java_class_instance") return jcontainer
python
{ "resource": "" }
q15445
_save_documentation
train
def _save_documentation(version, base_url="https://spark.apache.org/docs"): """ Write the spark property documentation to a file """ target_dir = join(dirname(__file__), 'spylon', 'spark') with open(join(target_dir, "spark_properties_{}.json".format(version)), 'w') as fp: all_props = _fetch_documentation(version=version, base_url=base_url) all_props = sorted(all_props, key=lambda x: x[0]) all_props_d = [{"property": p, "default": d, "description": desc} for p, d, desc in all_props] json.dump(all_props_d, fp, indent=2)
python
{ "resource": "" }
q15446
_pretty_time_delta
train
def _pretty_time_delta(td): """Creates a string representation of a time delta. Parameters ---------- td : :class:`datetime.timedelta` Returns ------- pretty_formatted_datetime : str """ seconds = td.total_seconds() sign_string = '-' if seconds < 0 else '' seconds = abs(int(seconds)) days, seconds = divmod(seconds, 86400) hours, seconds = divmod(seconds, 3600) minutes, seconds = divmod(seconds, 60) d = dict(sign=sign_string, days=days, hours=hours, minutes=minutes, seconds=seconds) if days > 0: return '{sign}{days}d{hours:02d}h{minutes:02d}m:{seconds:02d}s'.format(**d) elif hours > 0: return '{sign}{hours:02d}h{minutes:02d}m:{seconds:02d}s'.format(**d) elif minutes > 0: return '{sign}{minutes:02d}m:{seconds:02d}s'.format(**d) else: return '{sign}{seconds:02d}s'.format(**d)
python
{ "resource": "" }
q15447
_format_stage_info
train
def _format_stage_info(bar_width, stage_info, duration, timedelta_formatter=_pretty_time_delta): """Formats the Spark stage progress. Parameters ---------- bar_width : int Width of the progressbar to print out. stage_info : :class:`pyspark.status.StageInfo` Information about the running stage stage_id : int Unique ID of the stage duration : :class:`datetime.timedelta` Duration of the stage so far timedelta_formatter : callable Converts a timedelta to a string. Returns ------- formatted : str """ dur = timedelta_formatter(duration) percent = (stage_info.numCompletedTasks * bar_width) // stage_info.numTasks bar = [' '] * bar_width for i in range(bar_width): char = ' ' if i < percent: char = '=' if i == percent: char = '>' bar[i] = char bar = ''.join(bar) return "[Stage {info.stageId}:{bar} " \ "({info.numCompletedTasks} + {info.numActiveTasks} / {info.numTasks} Dur: {dur}]" \ .format(info=stage_info, dur=dur, bar=bar)
python
{ "resource": "" }
q15448
ProgressPrinter.resume
train
def resume(self): """Resume progress updates.""" with self.condition: self.paused = False self.condition.notify_all()
python
{ "resource": "" }
q15449
ProgressPrinter.run
train
def run(self): """Run the progress printing loop.""" last_status = '' # lambda is used to avoid http://bugs.python.org/issue30473 in py36 start_times = defaultdict(lambda: datetime.datetime.now()) max_stage_id = -1 status = self.sc.statusTracker() while True: with self.condition: if self.sc._jsc is None or not self.alive: # End the thread self.paused = True break elif self.paused: # Pause the thread self.condition.wait() stage_ids = status.getActiveStageIds() progressbar_list = [] # Only show first 3 stage_counter = 0 current_max_stage = max_stage_id for stage_id in stage_ids: stage_info = status.getStageInfo(stage_id) if stage_info and stage_info.numTasks > 0: # Set state variables used for flushing later current_max_stage = stage_id stage_counter += 1 td = datetime.datetime.now() - start_times[stage_id] s = _format_stage_info(self.bar_width, stage_info, td, self.timedelta_formatter) progressbar_list.append(s) if stage_counter == 3: break # Ensure that when we get a new maximum stage id we print a \n # to make the progress bar go on to the next line. if current_max_stage > max_stage_id: if last_status != '': sys.stderr.write("\n") sys.stderr.flush() max_stage_id = current_max_stage new_status = ' '.join(progressbar_list) if new_status != last_status: sys.stderr.write("\r" + new_status) sys.stderr.flush() last_status = new_status time.sleep(self.sleep_time)
python
{ "resource": "" }
q15450
create_conda_env
train
def create_conda_env(sandbox_dir, env_name, dependencies, options=()): """ Create a conda environment inside the current sandbox for the given list of dependencies and options. Parameters ---------- sandbox_dir : str env_name : str dependencies : list List of conda specs options List of additional options to pass to conda. Things like ["-c", "conda-forge"] Returns ------- (env_dir, env_name) """ env_dir = os.path.join(sandbox_dir, env_name) cmdline = ["conda", "create", "--yes", "--copy", "--quiet", "-p", env_dir] + list(options) + dependencies log.info("Creating conda environment: ") log.info(" command line: %s", cmdline) subprocess.check_call(cmdline, stderr=subprocess.PIPE, stdout=subprocess.PIPE) log.debug("Environment created") return env_dir, env_name
python
{ "resource": "" }
q15451
archive_dir
train
def archive_dir(env_dir): """ Compresses the directory and writes to its parent Parameters ---------- env_dir : str Returns ------- str """ output_filename = env_dir + ".zip" log.info("Archiving conda environment: %s -> %s", env_dir, output_filename) subprocess.check_call(["zip", "-r", "-0", "-q", output_filename, env_dir]) return output_filename
python
{ "resource": "" }
q15452
prepare_pyspark_yarn_interactive
train
def prepare_pyspark_yarn_interactive(env_name, env_archive, spark_conf): """ This ASSUMES that you have a compatible python environment running on the other side. WARNING: Injects "PYSPARK_DRIVER_PYTHON" and "PYSPARK_PYTHON" as environmental variables into your current environment Parameters ---------- env_name : str env_archive : str spark_conf : SparkConfiguration Examples -------- >>> from spylon.spark import SparkConfiguration >>> conf = SparkConfiguration() >>> import spylon.spark.yarn_launcher as yl >>> conf = yl.prepare_pyspark_yarn_interactive( ... env_name="yarn-pyspark-env", env_archive="hdfs:///path/to/conda_envs/yarn-pyspark-env.zip", ... spark_conf=conf ... ) ... # Create our context ... sc, sqlC = conf.sql_context("conda-test") ... # Example of it working ... rdd = sc.parallelize(range(10), 10) ... ... def pandas_test(x): ... import numpy ... import pandas ... import sys ... import socket ... return [{"numpy": numpy.__version__, "pandas": pandas.__version__, ... "host": socket.getfqdn(), "python": sys.executable}] ... ... rdd.mapPartitions(pandas_test).collect() Returns ------- SparkConfiguration Copy of `spark_conf` input with added Yarn requirements. """ from .launcher import SparkConfiguration assert isinstance(spark_conf, SparkConfiguration) yarn_python = os.path.join(".", "CONDA", env_name, "bin", "python") archives = env_archive + "#CONDA" new_spark_conf = copy.deepcopy(spark_conf) new_spark_conf.master = "yarn" new_spark_conf.deploy_mode = "client" new_spark_conf.archives = [archives] new_spark_conf.conf.set("spark.executorEnv.PYSPARK_PYTHON", yarn_python) new_spark_conf._python_path = yarn_python env_update = { "PYSPARK_DRIVER_PYTHON": sys.executable, "PYSPARK_PYTHON": yarn_python } os.environ.update(env_update) return new_spark_conf
python
{ "resource": "" }
q15453
run_pyspark_yarn_cluster
train
def run_pyspark_yarn_cluster(env_dir, env_name, env_archive, args): """ Initializes the requires spark command line options on order to start a python job with the given python environment. Parameters ---------- env_dir : str env_name : str env_archive : str args : list Returns ------- This call will spawn a child process and block until that is complete. """ env = dict(os.environ) yarn_python = os.path.join(".", "CONDA", env_name, "bin", "python") archives = env_archive + "#CONDA" prepend_args = [ "--master", "yarn", "--deploy-mode", "cluster", "--conf", "spark.yarn.appMasterEnv.PYSPARK_PYTHON={}".format(yarn_python), "--archives", archives, ] env_update = { "PYSPARK_PYTHON": yarn_python } env.update(env_update) spark_submit = os.path.join(env["SPARK_HOME"], "bin", "spark-submit") log.info("Running spark in YARN-client mode with added arguments") log.info(" args: %s", pprint.pprint(prepend_args, indent=4)) log.info(" env: %s", pprint.pprint(env_update, indent=4)) # REPLACE our python process with another one subprocess.check_call([spark_submit] + prepend_args + args, env=env)
python
{ "resource": "" }
q15454
launcher
train
def launcher(deploy_mode, args, working_dir=".", cleanup=True): """Initializes arguments and starts up pyspark with the correct deploy mode and environment. Parameters ---------- deploy_mode : {"client", "cluster"} args : list Arguments to pass onwards to spark submit. working_dir : str, optional Path to working directory to use for creating conda environments. Defaults to the current working directory. cleanup : bool, optional Clean up extracted / generated files. This defaults to true since conda environments can be rather large. Returns ------- This call will spawn a child process and block until that is complete. """ spark_args = args.copy() # Scan through the arguments to find --conda # TODO: make this optional, if not specified ignore all the python stuff # Is this double dash in front of conda env correct? i = spark_args.index("--conda-env") # pop off the '--conda-env' portion and just drop it on the floor spark_args.pop(i) # Now pop off the actual conda env var passed to the launcher conda_env = spark_args.pop(i) cleanup_functions = [] # What else could this possibly be other than a string here? assert isinstance(conda_env, str) func_kwargs = {'conda_env': conda_env, 'deploy_mode': deploy_mode, 'working_dir': working_dir, 'cleanup_functions': cleanup_functions} if conda_env.startswith("hdfs:/"): # "hadoop fs -ls" can return URLs with only a single "/" after the "hdfs:" scheme env_name, env_dir, env_archive = _conda_from_hdfs(**func_kwargs) elif conda_env.endswith(".zip"): # We have a precreated conda environment around. env_name, env_dir, conda_env = _conda_from_zip(**func_kwargs) elif conda_env.endswith(".yaml"): # The case where we have to CREATE the environment ourselves env_name, env_dir, env_archive = _conda_from_yaml(**func_kwargs) else: raise NotImplementedError("Can only run launcher if your conda env is on hdfs (starts " "with 'hdfs:/', is already a zip (ends with '.zip'), or is " "coming from a yaml specification (ends with '.yaml' and " "conforms to the conda environment.yaml spec)") del func_kwargs func_kwargs = dict(env_dir=env_dir, env_name=env_name, env_archive=env_archive, args=spark_args) funcs = {'client': run_pyspark_yarn_client, 'cluster': run_pyspark_yarn_cluster} try: funcs[deploy_mode](**func_kwargs) finally: if not cleanup: return # iterate over and call all cleanup functions for function in cleanup_functions: try: function() except: log.exception("Cleanup function %s failed", function)
python
{ "resource": "" }
q15455
_extract_local_archive
train
def _extract_local_archive(working_dir, cleanup_functions, env_name, local_archive): """Helper internal function for extracting a zipfile and ensure that a cleanup is queued. Parameters ---------- working_dir : str cleanup_functions : List[() -> NoneType] env_name : str local_archive : str """ with zipfile.ZipFile(local_archive) as z: z.extractall(working_dir) archive_filenames = z.namelist() root_elements = {m.split(posixpath.sep, 1)[0] for m in archive_filenames} abs_archive_filenames = [os.path.abspath(os.path.join(working_dir, f)) for f in root_elements] def cleanup(): for fn in abs_archive_filenames: if os.path.isdir(fn): shutil.rmtree(fn) else: os.unlink(fn) cleanup_functions.append(cleanup) env_dir = os.path.join(working_dir, env_name) # Because of a python deficiency (Issue15795), the execute bits aren't # preserved when the zip file is unzipped. Need to add them back here. _fix_permissions(env_dir) return env_dir
python
{ "resource": "" }
q15456
keyfilter
train
def keyfilter(predicate, d, factory=dict): """ Filter items in dictionary by key >>> iseven = lambda x: x % 2 == 0 >>> d = {1: 2, 2: 3, 3: 4, 4: 5} >>> keyfilter(iseven, d) {2: 3, 4: 5} See Also: valfilter itemfilter keymap """ rv = factory() for k, v in iteritems(d): if predicate(k): rv[k] = v return rv
python
{ "resource": "" }
q15457
_SparkConfHelper.set_if_unset
train
def set_if_unset(self, key, value): """Set a particular spark property by the string key name if it hasn't already been set. This method allows chaining so that i can provide a similar feel to the standard Scala way of setting multiple configurations Parameters ---------- key : string value : string Returns ------- self """ if key not in self._conf_dict: self.set(key, value) return self
python
{ "resource": "" }
q15458
SparkConfiguration._repr_pretty_
train
def _repr_pretty_(self, p, cycle): """Pretty printer for the spark cnofiguration""" from IPython.lib.pretty import RepresentationPrinter assert isinstance(p, RepresentationPrinter) p.begin_group(1, "SparkConfiguration(") def kv(k, v, do_comma=True): p.text(k) p.pretty(v) if do_comma: p.text(", ") p.breakable() kv("launcher_arguments: ", self._spark_launcher_args) kv("conf: ", self._spark_conf_helper) kv("spark_home: ", self.spark_home) kv("python_path: ", self._python_path, False) p.end_group(1, ')')
python
{ "resource": "" }
q15459
SparkConfiguration._set_launcher_property
train
def _set_launcher_property(self, driver_arg_key, spark_property_key): """Handler for a special property that exists in both the launcher arguments and the spark conf dictionary. This will use the launcher argument if set falling back to the spark conf argument. If neither are set this is a noop (which means that the standard spark defaults will be used). Since `spark.driver.memory` (eg) can be set erroneously by a user on the standard spark conf, we want to be able to use that value if present. If we do not have this fall-back behavior then these settings are IGNORED when starting up the spark driver JVM under client mode (standalone, local, yarn-client or mesos-client). Parameters ---------- driver_arg_key : string Eg: "driver-memory" spark_property_key : string Eg: "spark.driver.memory" """ value = self._spark_launcher_args.get(driver_arg_key, self.conf._conf_dict.get(spark_property_key)) if value: self._spark_launcher_args[driver_arg_key] = value self.conf[spark_property_key] = value
python
{ "resource": "" }
q15460
SparkConfiguration._set_environment_variables
train
def _set_environment_variables(self): """Initializes the correct environment variables for spark""" cmd = [] # special case for driver JVM properties. self._set_launcher_property("driver-memory", "spark.driver.memory") self._set_launcher_property("driver-library-path", "spark.driver.extraLibraryPath") self._set_launcher_property("driver-class-path", "spark.driver.extraClassPath") self._set_launcher_property("driver-java-options", "spark.driver.extraJavaOptions") self._set_launcher_property("executor-memory", "spark.executor.memory") self._set_launcher_property("executor-cores", "spark.executor.cores") for key, val in self._spark_launcher_args.items(): if val is None: continue val = list(as_iterable(val)) if len(val): if key in self._boolean_args: cmd.append("--{key}".format(key=key)) else: sep = self._spark_launcher_arg_sep.get(key, ',') cmd.append('--{key} {val}'.format(key=key, val=sep.join(str(x) for x in val))) cmd += ['pyspark-shell'] cmd_line = ' '.join(x for x in cmd if x) os.environ["PYSPARK_SUBMIT_ARGS"] = cmd_line log.info("spark-submit arguments: %s", cmd_line)
python
{ "resource": "" }
q15461
SparkConfiguration._init_spark
train
def _init_spark(self): """Initializes spark so that pyspark is importable. This also sets up the required environment variables """ global _SPARK_INITIALIZED spark_home = self.spark_home python_path = self._python_path if use_findspark: if _SPARK_INITIALIZED: if spark_home == os.environ["SPARK_HOME"]: # matches with already initialized pass else: # findspark adds two path to the search path. sys.path.pop(0) sys.path.pop(0) findspark.init(spark_home=spark_home, edit_rc=False, edit_profile=False, python_path=python_path) else: findspark.init(spark_home=spark_home, edit_rc=False, edit_profile=False, python_path=python_path) _SPARK_INITIALIZED = True self._set_environment_variables()
python
{ "resource": "" }
q15462
WePay.get_authorization_url
train
def get_authorization_url(self, redirect_uri, client_id, options=None, scope=None): """ Returns a URL to send the user to in order to get authorization. After getting authorization the user will return to redirect_uri. Optionally, scope can be set to limit permissions, and the options dict can be loaded with any combination of state, user_name or user_email. :param str redirect_uri: The URI to redirect to after a authorization. :param str client_id: The client ID issued by WePay to your app. :keyword dict options: Allows for passing additional values to the authorize call, aside from scope, redirect_uri, and etc. :keyword str scope: A comma-separated string of permissions. """ if not options: options = {} if not scope: scope = "manage_accounts,collect_payments," \ "view_user,preapprove_payments," \ "manage_subscriptions,send_money" options['scope'] = scope options['redirect_uri'] = redirect_uri options['client_id'] = client_id return self.browser_endpoint + '/oauth2/authorize?' + \ urllib.urlencode(options)
python
{ "resource": "" }
q15463
Visualizer3D.figure
train
def figure(bgcolor=(1,1,1), size=(1000,1000)): """Create a blank figure. Parameters ---------- bgcolor : (3,) float Color of the background with values in [0,1]. size : (2,) int Width and height of the figure in pixels. """ Visualizer3D._scene = Scene(background_color=np.array(bgcolor)) Visualizer3D._scene.ambient_light = AmbientLight(color=[1.0, 1.0, 1.0], strength=1.0) Visualizer3D._init_size = np.array(size)
python
{ "resource": "" }
q15464
Visualizer3D.show
train
def show(animate=False, axis=np.array([0.,0.,1.]), clf=True, **kwargs): """Display the current figure and enable interaction. Parameters ---------- animate : bool Whether or not to animate the scene. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after showing the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. """ x = SceneViewer(Visualizer3D._scene, size=Visualizer3D._init_size, animate=animate, animate_axis=axis, save_directory=Visualizer3D._save_directory, **kwargs) if x.save_directory: Visualizer3D._save_directory = x.save_directory if clf: Visualizer3D.clf()
python
{ "resource": "" }
q15465
Visualizer3D.render
train
def render(n_frames=1, axis=np.array([0.,0.,1.]), clf=True, **kwargs): """Render frames from the viewer. Parameters ---------- n_frames : int Number of frames to render. If more than one, the scene will animate. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after rendering the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. Returns ------- list of perception.ColorImage A list of ColorImages rendered from the viewer. """ v = SceneViewer(Visualizer3D._scene, size=Visualizer3D._init_size, animate=(n_frames > 1), animate_axis=axis, max_frames=n_frames, **kwargs) if clf: Visualizer3D.clf() return v.saved_frames
python
{ "resource": "" }
q15466
Visualizer3D.save
train
def save(filename, n_frames=1, axis=np.array([0.,0.,1.]), clf=True, **kwargs): """Save frames from the viewer out to a file. Parameters ---------- filename : str The filename in which to save the output image. If more than one frame, should have extension .gif. n_frames : int Number of frames to render. If more than one, the scene will animate. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after rendering the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. """ if n_frames >1 and os.path.splitext(filename)[1] != '.gif': raise ValueError('Expected .gif file for multiple-frame save.') v = SceneViewer(Visualizer3D._scene, size=Visualizer3D._init_size, animate=(n_frames > 1), animate_axis=axis, max_frames=n_frames, **kwargs) data = [m.data for m in v.saved_frames] if len(data) > 1: imageio.mimwrite(filename, data, fps=v._animate_rate, palettesize=128, subrectangles=True) else: imageio.imwrite(filename, data[0]) if clf: Visualizer3D.clf()
python
{ "resource": "" }
q15467
Visualizer3D.save_loop
train
def save_loop(filename, framerate=30, time=3.0, axis=np.array([0.,0.,1.]), clf=True, **kwargs): """Off-screen save a GIF of one rotation about the scene. Parameters ---------- filename : str The filename in which to save the output image (should have extension .gif) framerate : int The frame rate at which to animate motion. time : float The number of seconds for one rotation. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after rendering the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. """ n_frames = framerate * time az = 2.0 * np.pi / n_frames Visualizer3D.save(filename, n_frames=n_frames, axis=axis, clf=clf, animate_rate=framerate, animate_az=az) if clf: Visualizer3D.clf()
python
{ "resource": "" }
q15468
Visualizer3D.clf
train
def clf(): """Clear the current figure """ Visualizer3D._scene = Scene(background_color=Visualizer3D._scene.background_color) Visualizer3D._scene.ambient_light = AmbientLight(color=[1.0, 1.0, 1.0], strength=1.0)
python
{ "resource": "" }
q15469
Visualizer3D.points
train
def points(points, T_points_world=None, color=np.array([0,1,0]), scale=0.01, n_cuts=20, subsample=None, random=False, name=None): """Scatter a point cloud in pose T_points_world. Parameters ---------- points : autolab_core.BagOfPoints or (n,3) float The point set to visualize. T_points_world : autolab_core.RigidTransform Pose of points, specified as a transformation from point frame to world frame. color : (3,) or (n,3) float Color of whole cloud or per-point colors scale : float Radius of each point. n_cuts : int Number of longitude/latitude lines on sphere points. subsample : int Parameter of subsampling to display fewer points. name : str A name for the object to be added. """ if isinstance(points, BagOfPoints): if points.dim != 3: raise ValueError('BagOfPoints must have dimension 3xN!') else: if type(points) is not np.ndarray: raise ValueError('Points visualizer expects BagOfPoints or numpy array!') if len(points.shape) == 1: points = points[:,np.newaxis].T if len(points.shape) != 2 or points.shape[1] != 3: raise ValueError('Numpy array of points must have dimension (N,3)') frame = 'points' if T_points_world: frame = T_points_world.from_frame points = PointCloud(points.T, frame=frame) color = np.array(color) if subsample is not None: num_points = points.num_points points, inds = points.subsample(subsample, random=random) if color.shape[0] == num_points and color.shape[1] == 3: color = color[inds,:] # transform into world frame if points.frame != 'world': if T_points_world is None: T_points_world = RigidTransform(from_frame=points.frame, to_frame='world') points_world = T_points_world * points else: points_world = points point_data = points_world.data if len(point_data.shape) == 1: point_data = point_data[:,np.newaxis] point_data = point_data.T mpcolor = color if len(color.shape) > 1: mpcolor = color[0] mp = MaterialProperties( color = np.array(mpcolor), k_a = 0.5, k_d = 0.3, k_s = 0.0, alpha = 10.0, smooth=True ) # For each point, create a sphere of the specified color and size. sphere = trimesh.creation.uv_sphere(scale, [n_cuts, n_cuts]) raw_pose_data = np.tile(np.eye(4), (points.num_points, 1)) raw_pose_data[3::4, :3] = point_data instcolor = None if color.ndim == 2 and color.shape[0] == points.num_points and color.shape[1] == 3: instcolor = color obj = InstancedSceneObject(sphere, raw_pose_data=raw_pose_data, colors=instcolor, material=mp) if name is None: name = str(uuid.uuid4()) Visualizer3D._scene.add_object(name, obj)
python
{ "resource": "" }
q15470
Visualizer3D.mesh
train
def mesh(mesh, T_mesh_world=RigidTransform(from_frame='obj', to_frame='world'), style='surface', smooth=False, color=(0.5,0.5,0.5), name=None): """Visualize a 3D triangular mesh. Parameters ---------- mesh : trimesh.Trimesh The mesh to visualize. T_mesh_world : autolab_core.RigidTransform The pose of the mesh, specified as a transformation from mesh frame to world frame. style : str Triangular mesh style, either 'surface' or 'wireframe'. smooth : bool If true, the mesh is smoothed before rendering. color : 3-tuple Color tuple. name : str A name for the object to be added. """ if not isinstance(mesh, trimesh.Trimesh): raise ValueError('Must provide a trimesh.Trimesh object') mp = MaterialProperties( color = np.array(color), k_a = 0.5, k_d = 0.3, k_s = 0.1, alpha = 10.0, smooth=smooth, wireframe=(style == 'wireframe') ) obj = SceneObject(mesh, T_mesh_world, mp) if name is None: name = str(uuid.uuid4()) Visualizer3D._scene.add_object(name, obj)
python
{ "resource": "" }
q15471
Visualizer3D.mesh_stable_pose
train
def mesh_stable_pose(mesh, T_obj_table, T_table_world=RigidTransform(from_frame='table', to_frame='world'), style='wireframe', smooth=False, color=(0.5,0.5,0.5), dim=0.15, plot_table=True, plot_com=False, name=None): """Visualize a mesh in a stable pose. Parameters ---------- mesh : trimesh.Trimesh The mesh to visualize. T_obj_table : autolab_core.RigidTransform Pose of object relative to table. T_table_world : autolab_core.RigidTransform Pose of table relative to world. style : str Triangular mesh style, either 'surface' or 'wireframe'. smooth : bool If true, the mesh is smoothed before rendering. color : 3-tuple Color tuple. dim : float The side-length for the table. plot_table : bool If true, a table is visualized as well. plot_com : bool If true, a ball is visualized at the object's center of mass. name : str A name for the object to be added. Returns ------- autolab_core.RigidTransform The pose of the mesh in world frame. """ T_obj_table = T_obj_table.as_frames('obj', 'table') T_obj_world = T_table_world * T_obj_table Visualizer3D.mesh(mesh, T_obj_world, style=style, smooth=smooth, color=color, name=name) if plot_table: Visualizer3D.table(T_table_world, dim=dim) if plot_com: Visualizer3D.points(Point(np.array(mesh.center_mass), 'obj'), T_obj_world, scale=0.01) return T_obj_world
python
{ "resource": "" }
q15472
Visualizer3D.table
train
def table(T_table_world=RigidTransform(from_frame='table', to_frame='world'), dim=0.16, color=(0,0,0)): """Plot a table mesh in 3D. Parameters ---------- T_table_world : autolab_core.RigidTransform Pose of table relative to world. dim : float The side-length for the table. color : 3-tuple Color tuple. """ table_vertices = np.array([[ dim, dim, 0], [ dim, -dim, 0], [-dim, dim, 0], [-dim, -dim, 0]]).astype('float') table_tris = np.array([[0, 1, 2], [1, 2, 3]]) table_mesh = trimesh.Trimesh(table_vertices, table_tris) table_mesh.apply_transform(T_table_world.matrix) Visualizer3D.mesh(table_mesh, style='surface', smooth=True, color=color)
python
{ "resource": "" }
q15473
Visualizer3D.plot3d
train
def plot3d(points, color=(0.5, 0.5, 0.5), tube_radius=0.005, n_components=30, name=None): """Plot a 3d curve through a set of points using tubes. Parameters ---------- points : (n,3) float A series of 3D points that define a curve in space. color : (3,) float The color of the tube. tube_radius : float Radius of tube representing curve. n_components : int The number of edges in each polygon representing the tube. name : str A name for the object to be added. """ points = np.asanyarray(points) mp = MaterialProperties( color = np.array(color), k_a = 0.5, k_d = 0.3, k_s = 0.0, alpha = 10.0, smooth=True ) # Generate circular polygon vec = np.array([0,1]) * tube_radius angle = np.pi * 2.0 / n_components rotmat = np.array([ [np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)] ]) perim = [] for i in range(n_components): perim.append(vec) vec = np.dot(rotmat, vec) poly = Polygon(perim) # Sweep it out along the path mesh = trimesh.creation.sweep_polygon(poly, points) obj = SceneObject(mesh, material=mp) if name is None: name = str(uuid.uuid4()) Visualizer3D._scene.add_object(name, obj)
python
{ "resource": "" }
q15474
Visualizer2D.figure
train
def figure(size=(8,8), *args, **kwargs): """ Creates a figure. Parameters ---------- size : 2-tuple size of the view window in inches args : list args of mayavi figure kwargs : list keyword args of mayavi figure Returns ------- pyplot figure the current figure """ return plt.figure(figsize=size, *args, **kwargs)
python
{ "resource": "" }
q15475
Visualizer2D.show
train
def show(filename=None, *args, **kwargs): """ Show the current figure. Parameters ---------- filename : :obj:`str` filename to save the image to, for auto-saving """ if filename is None: plt.show(*args, **kwargs) else: plt.savefig(filename, *args, **kwargs)
python
{ "resource": "" }
q15476
Visualizer2D.box
train
def box(b, line_width=2, color='g', style='-'): """ Draws a box on the current plot. Parameters ---------- b : :obj:`autolab_core.Box` box to draw line_width : int width of lines on side of box color : :obj:`str` color of box style : :obj:`str` style of lines to draw """ if not isinstance(b, Box): raise ValueError('Input must be of type Box') # get min pixels min_i = b.min_pt[1] min_j = b.min_pt[0] max_i = b.max_pt[1] max_j = b.max_pt[0] top_left = np.array([min_i, min_j]) top_right = np.array([max_i, min_j]) bottom_left = np.array([min_i, max_j]) bottom_right = np.array([max_i, max_j]) # create lines left = np.c_[top_left, bottom_left].T right = np.c_[top_right, bottom_right].T top = np.c_[top_left, top_right].T bottom = np.c_[bottom_left, bottom_right].T # plot lines plt.plot(left[:,0], left[:,1], linewidth=line_width, color=color, linestyle=style) plt.plot(right[:,0], right[:,1], linewidth=line_width, color=color, linestyle=style) plt.plot(top[:,0], top[:,1], linewidth=line_width, color=color, linestyle=style) plt.plot(bottom[:,0], bottom[:,1], linewidth=line_width, color=color, linestyle=style)
python
{ "resource": "" }
q15477
Visualizer2D.contour
train
def contour(c, subsample=1, size=10, color='g'): """ Draws a contour on the current plot by scattering points. Parameters ---------- c : :obj:`autolab_core.Contour` contour to draw subsample : int subsample rate for boundary pixels size : int size of scattered points color : :obj:`str` color of box """ if not isinstance(c, Contour): raise ValueError('Input must be of type Contour') for i in range(c.num_pixels)[0::subsample]: plt.scatter(c.boundary_pixels[i,1], c.boundary_pixels[i,0], s=size, c=color)
python
{ "resource": "" }
q15478
flatten
train
def flatten(in_list): """given a list of values in_list, flatten returns the list obtained by flattening the top-level elements of in_list.""" out_list = [] for val in in_list: if isinstance(val, list): out_list.extend(val) else: out_list.append(val) return out_list
python
{ "resource": "" }
q15479
create_parameterized_CAG
train
def create_parameterized_CAG(input, output, filename="CAG_with_indicators_and_values.pdf"): """ Create a CAG with mapped and parameterized indicators """ with open(input, "rb") as f: G = pickle.load(f) G.parameterize(year=2017, month=4) G.get_timeseries_values_for_indicators() with open(output, "wb") as f: pickle.dump(G, f)
python
{ "resource": "" }
q15480
get_concepts
train
def get_concepts(sts: List[Influence]) -> Set[str]: """ Get a set of all unique concepts in the list of INDRA statements. """ return set(flatMap(nameTuple, sts))
python
{ "resource": "" }
q15481
get_valid_statements_for_modeling
train
def get_valid_statements_for_modeling(sts: List[Influence]) -> List[Influence]: """ Select INDRA statements that can be used to construct a Delphi model from a given list of statements. """ return [ s for s in sts if is_grounded_statement(s) and (s.subj_delta["polarity"] is not None) and (s.obj_delta["polarity"] is not None) ]
python
{ "resource": "" }
q15482
is_grounded_to_name
train
def is_grounded_to_name(c: Concept, name: str, cutoff=0.7) -> bool: """ Check if a concept is grounded to a given name. """ return (top_grounding(c) == name) if is_well_grounded(c, cutoff) else False
python
{ "resource": "" }
q15483
contains_relevant_concept
train
def contains_relevant_concept( s: Influence, relevant_concepts: List[str], cutoff=0.7 ) -> bool: """ Returns true if a given Influence statement has a relevant concept, and false otherwise. """ return any( map(lambda c: contains_concept(s, c, cutoff=cutoff), relevant_concepts) )
python
{ "resource": "" }
q15484
top_grounding
train
def top_grounding(c: Concept) -> str: """ Return the top-scoring grounding from the UN ontology. """ return c.db_refs["UN"][0][0] if "UN" in c.db_refs else c.name
python
{ "resource": "" }
q15485
nameTuple
train
def nameTuple(s: Influence) -> Tuple[str, str]: """ Returns a 2-tuple consisting of the top groundings of the subj and obj of an Influence statement. """ return top_grounding(s.subj), top_grounding(s.obj)
python
{ "resource": "" }
q15486
createNewICM
train
def createNewICM(): """ Create a new ICM""" data = json.loads(request.data) G = AnalysisGraph.from_uncharted_json_serialized_dict(data) G.assemble_transition_model_from_gradable_adjectives() G.sample_from_prior() G.to_sql(app=current_app) _metadata = ICMMetadata.query.filter_by(id=G.id).first().deserialize() del _metadata["model_id"] return jsonify(_metadata)
python
{ "resource": "" }
q15487
getICMByUUID
train
def getICMByUUID(uuid: str): """ Fetch an ICM by UUID""" _metadata = ICMMetadata.query.filter_by(id=uuid).first().deserialize() del _metadata["model_id"] return jsonify(_metadata)
python
{ "resource": "" }
q15488
deleteICM
train
def deleteICM(uuid: str): """ Deletes an ICM""" _metadata = ICMMetadata.query.filter_by(id=uuid).first() db.session.delete(_metadata) db.session.commit() return ("", 204)
python
{ "resource": "" }
q15489
getExperiment
train
def getExperiment(uuid: str, exp_id: str): """ Fetch experiment results""" experimentResult = ForwardProjectionResult.query.filter_by( id=exp_id ).first() return jsonify(experimentResult.deserialize())
python
{ "resource": "" }
q15490
create_statement_inspection_table
train
def create_statement_inspection_table(sts: List[Influence]): """ Display an HTML representation of a table with INDRA statements to manually inspect for validity. Args: sts: A list of INDRA statements to be manually inspected for validity. """ columns = [ "un_groundings", "subj_polarity", "obj_polarity", "Sentence", "Source API", ] polarity_to_str = lambda x: "+" if x == 1 else "-" if x == -1 else "None" l = [] for s in sts: subj_un_grounding = s.subj.db_refs["UN"][0][0].split("/")[-1] obj_un_grounding = s.obj.db_refs["UN"][0][0].split("/")[-1] subj_polarity = s.subj_delta["polarity"] obj_polarity = s.obj_delta["polarity"] subj_adjectives = s.subj_delta["adjectives"] for e in s.evidence: l.append( ( (subj_un_grounding, obj_un_grounding), subj_polarity, obj_polarity, e.text, e.source_api, ) ) df = pd.DataFrame(l, columns=columns) df = df.pivot_table(index=["un_groundings", "Source API", "Sentence"]) def hover(hover_color="#ffff99"): return dict( selector="tr:hover", props=[("background-color", "%s" % hover_color)], ) styles = [ hover(), dict(props=[("font-size", "100%"), ("font-family", "Gill Sans")]), ] return df.style.set_table_styles(styles)
python
{ "resource": "" }
q15491
get_python_shell
train
def get_python_shell(): """Determine python shell get_python_shell() returns 'shell' (started python on command line using "python") 'ipython' (started ipython on command line using "ipython") 'ipython-notebook' (e.g., running in Spyder or started with "ipython qtconsole") 'jupyter-notebook' (running in a Jupyter notebook) See also https://stackoverflow.com/a/37661854 """ env = os.environ shell = "shell" program = os.path.basename(env["_"]) if "jupyter-notebook" in program: shell = "jupyter-notebook" elif "JPY_PARENT_PID" in env or "ipython" in program: shell = "ipython" if "JPY_PARENT_PID" in env: shell = "ipython-notebook" return shell
python
{ "resource": "" }
q15492
create_precipitation_centered_CAG
train
def create_precipitation_centered_CAG(input, output): """ Get a CAG that examines the downstream effects of changes in precipitation. """ with open(input, "rb") as f: G = pickle.load(f) G = G.get_subgraph_for_concept( "UN/events/weather/precipitation", depth=2, reverse=False ) G.prune(cutoff=2) # Manually correcting a bad CWMS extraction G.edges[ "UN/events/weather/precipitation", "UN/entities/human/infrastructure/transportation/road", ]["InfluenceStatements"][0].obj_delta["polarity"] = -1 with open(output, "wb") as f: pickle.dump(G, f)
python
{ "resource": "" }
q15493
index_modules
train
def index_modules(root) -> Dict: """ Counts the number of modules in the Fortran file including the program file. Each module is written out into a separate Python file. """ module_index_dict = { node["name"]: (node.get("tag"), index) for index, node in enumerate(root) if node.get("tag") in ("module", "program", "subroutine") } return module_index_dict
python
{ "resource": "" }
q15494
draw_graph
train
def draw_graph(G: nx.DiGraph, filename: str): """ Draw a networkx graph with Pygraphviz. """ A = to_agraph(G) A.graph_attr["rankdir"] = "LR" A.draw(filename, prog="dot")
python
{ "resource": "" }
q15495
get_input_nodes
train
def get_input_nodes(G: nx.DiGraph) -> List[str]: """ Get all input nodes from a network. """ return [n for n, d in G.in_degree() if d == 0]
python
{ "resource": "" }
q15496
get_output_nodes
train
def get_output_nodes(G: nx.DiGraph) -> List[str]: """ Get all output nodes from a network. """ return [n for n, d in G.out_degree() if d == 0]
python
{ "resource": "" }
q15497
nx_graph_from_dotfile
train
def nx_graph_from_dotfile(filename: str) -> nx.DiGraph: """ Get a networkx graph from a DOT file, and reverse the edges. """ return nx.DiGraph(read_dot(filename).reverse())
python
{ "resource": "" }
q15498
to_dotfile
train
def to_dotfile(G: nx.DiGraph, filename: str): """ Output a networkx graph to a DOT file. """ A = to_agraph(G) A.write(filename)
python
{ "resource": "" }
q15499
get_shared_nodes
train
def get_shared_nodes(G1: nx.DiGraph, G2: nx.DiGraph) -> List[str]: """Get all the nodes that are common to both networks.""" return list(set(G1.nodes()).intersection(set(G2.nodes())))
python
{ "resource": "" }