_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q6400
TdsTypeInferrer.from_value
train
def from_value(self, value): """ Function infers TDS type from Python value. :param value: value from which to infer TDS type :return: An instance of subclass of :class:`BaseType` """ if value is None: sql_type = NVarCharType(size=1) else: sql_type = self._from_class_value(value, type(value)) return sql_type
python
{ "resource": "" }
q6401
dict_row_strategy
train
def dict_row_strategy(column_names): """ Dict row strategy, rows returned as dictionaries """ # replace empty column names with indices column_names = [(name or idx) for idx, name in enumerate(column_names)] def row_factory(row): return dict(zip(column_names, row)) return row_factory
python
{ "resource": "" }
q6402
namedtuple_row_strategy
train
def namedtuple_row_strategy(column_names): """ Namedtuple row strategy, rows returned as named tuples Column names that are not valid Python identifiers will be replaced with col<number>_ """ import collections # replace empty column names with placeholders column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)] row_class = collections.namedtuple('Row', column_names) def row_factory(row): return row_class(*row) return row_factory
python
{ "resource": "" }
q6403
recordtype_row_strategy
train
def recordtype_row_strategy(column_names): """ Recordtype row strategy, rows returned as recordtypes Column names that are not valid Python identifiers will be replaced with col<number>_ """ try: from namedlist import namedlist as recordtype # optional dependency except ImportError: from recordtype import recordtype # optional dependency # replace empty column names with placeholders column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)] recordtype_row_class = recordtype('Row', column_names) # custom extension class that supports indexing class Row(recordtype_row_class): def __getitem__(self, index): if isinstance(index, slice): return tuple(getattr(self, x) for x in self.__slots__[index]) return getattr(self, self.__slots__[index]) def __setitem__(self, index, value): setattr(self, self.__slots__[index], value) def row_factory(row): return Row(*row) return row_factory
python
{ "resource": "" }
q6404
_get_servers_deque
train
def _get_servers_deque(servers, database): """ Returns deque of servers for given tuple of servers and database name. This deque have active server at the begining, if first server is not accessible at the moment the deque will be rotated, second server will be moved to the first position, thirt to the second position etc, and previously first server will be moved to the last position. This allows to remember last successful server between calls to connect function. """ key = (servers, database) if key not in _servers_deques: _servers_deques[key] = deque(servers) return _servers_deques[key]
python
{ "resource": "" }
q6405
_parse_connection_string
train
def _parse_connection_string(connstr): """ MSSQL style connection string parser Returns normalized dictionary of connection string parameters """ res = {} for item in connstr.split(';'): item = item.strip() if not item: continue key, value = item.split('=', 1) key = key.strip().lower().replace(' ', '_') value = value.strip() res[key] = value return res
python
{ "resource": "" }
q6406
Connection.commit
train
def commit(self): """ Commit transaction which is currently in progress. """ self._assert_open() if self._autocommit: return if not self._conn.tds72_transaction: return self._main_cursor._commit(cont=True, isolation_level=self._isolation_level)
python
{ "resource": "" }
q6407
Connection.cursor
train
def cursor(self): """ Return cursor object that can be used to make queries and fetch results from the database. """ self._assert_open() if self.mars_enabled: in_tran = self._conn.tds72_transaction if in_tran and self._dirty: try: return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) except (socket.error, OSError) as e: self._conn.close() raise else: try: return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) except (socket.error, OSError) as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET): raise self._conn.close() except ClosedConnectionError: pass self._assert_open() return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) else: return Cursor(self, self._conn.main_session, self._tzinfo_factory)
python
{ "resource": "" }
q6408
Connection.rollback
train
def rollback(self): """ Roll back transaction which is currently in progress. """ try: if self._autocommit: return if not self._conn or not self._conn.is_connected(): return if not self._conn.tds72_transaction: return self._main_cursor._rollback(cont=True, isolation_level=self._isolation_level) except socket.error as e: if e.errno in (errno.ENETRESET, errno.ECONNRESET, errno.EPIPE): return self._conn.close() raise except ClosedConnectionError: pass
python
{ "resource": "" }
q6409
Connection.close
train
def close(self): """ Close connection to an MS SQL Server. This function tries to close the connection and free all memory used. It can be called more than once in a row. No exception is raised in this case. """ if self._conn: if self._pooling: _connection_pool.add(self._key, (self._conn, self._main_cursor._session)) else: self._conn.close() self._active_cursor = None self._main_cursor = None self._conn = None self._closed = True
python
{ "resource": "" }
q6410
Cursor.get_proc_return_status
train
def get_proc_return_status(self): """ Last stored proc result """ if self._session is None: return None if not self._session.has_status: self._session.find_return_status() return self._session.ret_status if self._session.has_status else None
python
{ "resource": "" }
q6411
Cursor.cancel
train
def cancel(self): """ Cancel current statement """ conn = self._assert_open() conn._try_activate_cursor(self) self._session.cancel_if_pending()
python
{ "resource": "" }
q6412
Cursor.execute
train
def execute(self, operation, params=()): """ Execute the query :param operation: SQL statement :type operation: str """ conn = self._assert_open() conn._try_activate_cursor(self) self._execute(operation, params) # for compatibility with pyodbc return self
python
{ "resource": "" }
q6413
Cursor.execute_scalar
train
def execute_scalar(self, query_string, params=None): """ This method sends a query to the MS SQL Server to which this object instance is connected, then returns first column of first row from result. An exception is raised on failure. If there are pending results or rows prior to executing this command, they are silently discarded. This method accepts Python formatting. Please see execute_query() for details. This method is useful if you want just a single value, as in: ``conn.execute_scalar('SELECT COUNT(*) FROM employees')`` This method works in the same way as ``iter(conn).next()[0]``. Remaining rows, if any, can still be iterated after calling this method. """ self.execute(query_string, params) row = self.fetchone() if not row: return None return row[0]
python
{ "resource": "" }
q6414
Cursor.fetchone
train
def fetchone(self): """ Fetches next row, or ``None`` if there are no more rows """ row = self._session.fetchone() if row: return self._row_factory(row)
python
{ "resource": "" }
q6415
JobDirectory.calculate_path
train
def calculate_path(self, remote_path, input_type): """ Verify remote_path is in directory for input_type inputs and create directory if needed. """ directory, allow_nested_files = self._directory_for_file_type(input_type) path = get_mapped_file(directory, remote_path, allow_nested_files=allow_nested_files) return path
python
{ "resource": "" }
q6416
BaseJobClient.setup
train
def setup(self, tool_id=None, tool_version=None, preserve_galaxy_python_environment=None): """ Setup remote Pulsar server to run this job. """ setup_args = {"job_id": self.job_id} if tool_id: setup_args["tool_id"] = tool_id if tool_version: setup_args["tool_version"] = tool_version if preserve_galaxy_python_environment: setup_args["preserve_galaxy_python_environment"] = preserve_galaxy_python_environment return self.setup_handler.setup(**setup_args)
python
{ "resource": "" }
q6417
JobClient.launch
train
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None): """ Queue up the execution of the supplied `command_line` on the remote server. Called launch for historical reasons, should be renamed to enqueue or something like that. **Parameters** command_line : str Command to execute. """ launch_params = dict(command_line=command_line, job_id=self.job_id) submit_params_dict = submit_params(self.destination_params) if submit_params_dict: launch_params['params'] = json_dumps(submit_params_dict) if dependencies_description: launch_params['dependencies_description'] = json_dumps(dependencies_description.to_dict()) if env: launch_params['env'] = json_dumps(env) if remote_staging: launch_params['remote_staging'] = json_dumps(remote_staging) if job_config and 'touch_outputs' in job_config: # message clients pass the entire job config launch_params['submit_extras'] = json_dumps({'touch_outputs': job_config['touch_outputs']}) if job_config and self.setup_handler.local: # Setup not yet called, job properties were inferred from # destination arguments. Hence, must have Pulsar setup job # before queueing. setup_params = _setup_params_from_job_config(job_config) launch_params['setup_params'] = json_dumps(setup_params) return self._raw_execute("submit", launch_params)
python
{ "resource": "" }
q6418
FileActionMapper.__process_action
train
def __process_action(self, action, file_type): """ Extension point to populate extra action information after an action has been created. """ if getattr(action, "inject_url", False): self.__inject_url(action, file_type) if getattr(action, "inject_ssh_properties", False): self.__inject_ssh_properties(action)
python
{ "resource": "" }
q6419
_handle_default
train
def _handle_default(value, script_name): """ There are two potential variants of these scripts, the Bash scripts that are meant to be run within PULSAR_ROOT for older-style installs and the binaries created by setup.py as part of a proper pulsar installation. This method first looks for the newer style variant of these scripts and returns the full path to them if needed and falls back to the bash scripts if these cannot be found. """ if value: return value installed_script = which("pulsar-%s" % script_name.replace("_", "-")) if installed_script: return installed_script else: return "scripts/%s.bash" % script_name
python
{ "resource": "" }
q6420
JobInputs.rewrite_paths
train
def rewrite_paths(self, local_path, remote_path): """ Rewrite references to `local_path` with `remote_path` in job inputs. """ self.__rewrite_command_line(local_path, remote_path) self.__rewrite_config_files(local_path, remote_path)
python
{ "resource": "" }
q6421
TransferTracker.rewrite_input_paths
train
def rewrite_input_paths(self): """ For each file that has been transferred and renamed, updated command_line and configfiles to reflect that rewrite. """ for local_path, remote_path in self.file_renames.items(): self.job_inputs.rewrite_paths(local_path, remote_path)
python
{ "resource": "" }
q6422
PulsarExchange.__get_payload
train
def __get_payload(self, uuid, failed): """Retry reading a message from the publish_uuid_store once, delete on the second failure.""" # Caller should have the publish_uuid_store lock try: return self.publish_uuid_store[uuid] except Exception as exc: msg = "Failed to load payload from publish store for UUID %s, %s: %s" if uuid in failed: log.error(msg, uuid, "discarding", str(exc)) self.__discard_publish_uuid(uuid, failed) else: log.error(msg, uuid, "will try agan", str(exc)) failed.add(uuid) return None
python
{ "resource": "" }
q6423
__job_complete_dict
train
def __job_complete_dict(complete_status, manager, job_id): """ Build final dictionary describing completed job for consumption by Pulsar client. """ return_code = manager.return_code(job_id) if return_code == PULSAR_UNKNOWN_RETURN_CODE: return_code = None stdout_contents = manager.stdout_contents(job_id).decode("utf-8") stderr_contents = manager.stderr_contents(job_id).decode("utf-8") job_directory = manager.job_directory(job_id) as_dict = dict( job_id=job_id, complete="true", # Is this still used or is it legacy. status=complete_status, returncode=return_code, stdout=stdout_contents, stderr=stderr_contents, working_directory=job_directory.working_directory(), metadata_directory=job_directory.metadata_directory(), working_directory_contents=job_directory.working_directory_contents(), metadata_directory_contents=job_directory.metadata_directory_contents(), outputs_directory_contents=job_directory.outputs_directory_contents(), system_properties=manager.system_properties(), pulsar_version=pulsar_version, ) return as_dict
python
{ "resource": "" }
q6424
submit_job
train
def submit_job(manager, job_config): """ Launch new job from specified config. May have been previously 'setup' if 'setup_params' in job_config is empty. """ # job_config is raw dictionary from JSON (from MQ or HTTP endpoint). job_id = job_config.get('job_id') try: command_line = job_config.get('command_line') setup_params = job_config.get('setup_params', {}) force_setup = job_config.get('setup') remote_staging = job_config.get('remote_staging', {}) dependencies_description = job_config.get('dependencies_description', None) env = job_config.get('env', []) submit_params = job_config.get('submit_params', {}) touch_outputs = job_config.get('touch_outputs', []) job_config = None if setup_params or force_setup: input_job_id = setup_params.get("job_id", job_id) tool_id = setup_params.get("tool_id", None) tool_version = setup_params.get("tool_version", None) use_metadata = setup_params.get("use_metadata", False) job_config = setup_job( manager, input_job_id, tool_id, tool_version, use_metadata, ) if job_config is not None: job_directory = job_config["job_directory"] jobs_directory = os.path.abspath(os.path.join(job_directory, os.pardir)) command_line = command_line.replace('__PULSAR_JOBS_DIRECTORY__', jobs_directory) # TODO: Handle __PULSAR_JOB_DIRECTORY__ config files, metadata files, etc... manager.touch_outputs(job_id, touch_outputs) launch_config = { "remote_staging": remote_staging, "command_line": command_line, "dependencies_description": dependencies_description, "submit_params": submit_params, "env": env, "setup_params": setup_params, } manager.preprocess_and_launch(job_id, launch_config) except Exception: manager.handle_failure_before_launch(job_id) raise
python
{ "resource": "" }
q6425
PulsarApp.__setup_tool_config
train
def __setup_tool_config(self, conf): """ Setups toolbox object and authorization mechanism based on supplied toolbox_path. """ tool_config_files = conf.get("tool_config_files", None) if not tool_config_files: # For compatibity with Galaxy, allow tool_config_file # option name. tool_config_files = conf.get("tool_config_file", None) toolbox = None if tool_config_files: toolbox = ToolBox(tool_config_files) else: log.info(NOT_WHITELIST_WARNING) self.toolbox = toolbox self.authorizer = get_authorizer(toolbox)
python
{ "resource": "" }
q6426
PulsarApp.only_manager
train
def only_manager(self): """Convience accessor for tests and contexts with sole manager.""" assert len(self.managers) == 1, MULTIPLE_MANAGERS_MESSAGE return list(self.managers.values())[0]
python
{ "resource": "" }
q6427
app_factory
train
def app_factory(global_conf, **local_conf): """ Returns the Pulsar WSGI application. """ configuration_file = global_conf.get("__file__", None) webapp = init_webapp(ini_path=configuration_file, local_conf=local_conf) return webapp
python
{ "resource": "" }
q6428
check_dependencies
train
def check_dependencies(): """Make sure virtualenv is in the path.""" print 'Checking dependencies...' if not HAS_VIRTUALENV: print 'Virtual environment not found.' # Try installing it via easy_install... if HAS_EASY_INSTALL: print 'Installing virtualenv via easy_install...', run_command(['easy_install', 'virtualenv'], die_message='easy_install failed to install virtualenv' '\ndevelopment requires virtualenv, please' ' install it using your favorite tool') if not run_command(['which', 'virtualenv']): die('ERROR: virtualenv not found in path.\n\ndevelopment ' ' requires virtualenv, please install it using your' ' favorite package management tool and ensure' ' virtualenv is in your path') print 'virtualenv installation done.' else: die('easy_install not found.\n\nInstall easy_install' ' (python-setuptools in ubuntu) or virtualenv by hand,' ' then rerun.') print 'dependency check done.'
python
{ "resource": "" }
q6429
ClientManager.get_client
train
def get_client(self, destination_params, job_id, **kwargs): """Build a client given specific destination parameters and job_id.""" destination_params = _parse_destination_params(destination_params) destination_params.update(**kwargs) job_manager_interface_class = self.job_manager_interface_class job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args) job_manager_interface = job_manager_interface_class(**job_manager_interface_args) return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
python
{ "resource": "" }
q6430
CliInterface.get_plugins
train
def get_plugins(self, shell_params, job_params): """ Return shell and job interface defined by and configured via specified params. """ shell = self.get_shell_plugin(shell_params) job_interface = self.get_job_interface(job_params) return shell, job_interface
python
{ "resource": "" }
q6431
atomicish_move
train
def atomicish_move(source, destination, tmp_suffix="_TMP"): """Move source to destination without risk of partial moves. > from tempfile import mkdtemp > from os.path import join, exists > temp_dir = mkdtemp() > source = join(temp_dir, "the_source") > destination = join(temp_dir, "the_dest") > open(source, "wb").write(b"Hello World!") > assert exists(source) > assert not exists(destination) > atomicish_move(source, destination) > assert not exists(source) > assert exists(destination) """ destination_dir = os.path.dirname(destination) destination_name = os.path.basename(destination) temp_destination = os.path.join(destination_dir, "%s%s" % (destination_name, tmp_suffix)) shutil.move(source, temp_destination) os.rename(temp_destination, destination)
python
{ "resource": "" }
q6432
env_to_statement
train
def env_to_statement(env): ''' Return the abstraction description of an environment variable definition into a statement for shell script. >>> env_to_statement(dict(name='X', value='Y')) 'X="Y"; export X' >>> env_to_statement(dict(name='X', value='Y', raw=True)) 'X=Y; export X' >>> env_to_statement(dict(name='X', value='"A","B","C"')) 'X="\\\\"A\\\\",\\\\"B\\\\",\\\\"C\\\\""; export X' >>> env_to_statement(dict(file="Y")) '. "Y"' >>> env_to_statement(dict(file="'RAW $FILE'", raw=True)) ". 'RAW $FILE'" >>> # Source file takes precedence >>> env_to_statement(dict(name='X', value='"A","B","C"', file="S")) '. "S"' >>> env_to_statement(dict(execute="module load java/1.5.1")) 'module load java/1.5.1' ''' source_file = env.get('file', None) if source_file: return '. %s' % __escape(source_file, env) execute = env.get('execute', None) if execute: return execute name = env['name'] value = __escape(env['value'], env) return '%s=%s; export %s' % (name, value, name)
python
{ "resource": "" }
q6433
copy_to_temp
train
def copy_to_temp(object): """ Copy file-like object to temp file and return path. """ temp_file = NamedTemporaryFile(delete=False) _copy_and_close(object, temp_file) return temp_file.name
python
{ "resource": "" }
q6434
build_submit_description
train
def build_submit_description(executable, output, error, user_log, query_params): """ Build up the contents of a condor submit description file. >>> submit_args = dict(executable='/path/to/script', output='o', error='e', user_log='ul') >>> submit_args['query_params'] = dict() >>> default_description = build_submit_description(**submit_args) >>> assert 'executable = /path/to/script' in default_description >>> assert 'output = o' in default_description >>> assert 'error = e' in default_description >>> assert 'queue' in default_description >>> assert 'universe = vanilla' in default_description >>> assert 'universe = standard' not in default_description >>> submit_args['query_params'] = dict(universe='standard') >>> std_description = build_submit_description(**submit_args) >>> assert 'universe = vanilla' not in std_description >>> assert 'universe = standard' in std_description """ all_query_params = DEFAULT_QUERY_CLASSAD.copy() all_query_params.update(query_params) submit_description = [] for key, value in all_query_params.items(): submit_description.append('%s = %s' % (key, value)) submit_description.append('executable = ' + executable) submit_description.append('output = ' + output) submit_description.append('error = ' + error) submit_description.append('log = ' + user_log) submit_description.append('queue') return '\n'.join(submit_description)
python
{ "resource": "" }
q6435
condor_submit
train
def condor_submit(submit_file): """ Submit a condor job described by the given file. Parse an external id for the submission or return None and a reason for the failure. """ external_id = None try: submit = Popen(('condor_submit', submit_file), stdout=PIPE, stderr=STDOUT) message, _ = submit.communicate() if submit.returncode == 0: external_id = parse_external_id(message, type='condor') else: message = PROBLEM_PARSING_EXTERNAL_ID except Exception as e: message = str(e) return external_id, message
python
{ "resource": "" }
q6436
condor_stop
train
def condor_stop(external_id): """ Stop running condor job and return a failure_message if this fails. """ failure_message = None try: check_call(('condor_rm', external_id)) except CalledProcessError: failure_message = "condor_rm failed" except Exception as e: "error encountered calling condor_rm: %s" % e return failure_message
python
{ "resource": "" }
q6437
LockManager.get_lock
train
def get_lock(self, path): """ Get a job lock corresponding to the path - assumes parent directory exists but the file itself does not. """ if self.lockfile: return self.lockfile.LockFile(path) else: with self.job_locks_lock: if path not in self.job_locks: lock = threading.Lock() self.job_locks[path] = lock else: lock = self.job_locks[path] return lock
python
{ "resource": "" }
q6438
build
train
def build(client, destination_args): """ Build a SetupHandler object for client from destination parameters. """ # Have defined a remote job directory, lets do the setup locally. if client.job_directory: handler = LocalSetupHandler(client, destination_args) else: handler = RemoteSetupHandler(client) return handler
python
{ "resource": "" }
q6439
DrmaaSession.run_job
train
def run_job(self, **kwds): """ Create a DRMAA job template, populate with specified properties, run the job, and return the external_job_id. """ template = DrmaaSession.session.createJobTemplate() try: for key in kwds: setattr(template, key, kwds[key]) with DrmaaSession.session_lock: return DrmaaSession.session.runJob(template) finally: DrmaaSession.session.deleteJobTemplate(template)
python
{ "resource": "" }
q6440
url_to_destination_params
train
def url_to_destination_params(url): """Convert a legacy runner URL to a job destination >>> params_simple = url_to_destination_params("http://localhost:8913/") >>> params_simple["url"] 'http://localhost:8913/' >>> params_simple["private_token"] is None True >>> advanced_url = "https://1234x@example.com:8914/managers/longqueue" >>> params_advanced = url_to_destination_params(advanced_url) >>> params_advanced["url"] 'https://example.com:8914/managers/longqueue/' >>> params_advanced["private_token"] '1234x' >>> runner_url = "pulsar://http://localhost:8913/" >>> runner_params = url_to_destination_params(runner_url) >>> runner_params['url'] 'http://localhost:8913/' """ if url.startswith("pulsar://"): url = url[len("pulsar://"):] if not url.endswith("/"): url += "/" # Check for private token embedded in the URL. A URL of the form # https://moo@cow:8913 will try to contact https://cow:8913 # with a private key of moo private_token_format = "https?://(.*)@.*/?" private_token_match = match(private_token_format, url) private_token = None if private_token_match: private_token = private_token_match.group(1) url = url.replace("%s@" % private_token, '', 1) destination_args = {"url": url, "private_token": private_token} return destination_args
python
{ "resource": "" }
q6441
ensure_port_cleanup
train
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2): """ This makes sure any open ports are closed. Does this by connecting to them until they give connection refused. Servers should call like:: import paste.script ensure_port_cleanup([80, 443]) """ atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries, sleeptime=sleeptime)
python
{ "resource": "" }
q6442
Command.standard_parser
train
def standard_parser(cls, verbose=True, interactive=False, no_interactive=False, simulate=False, quiet=False, overwrite=False): """ Create a standard ``OptionParser`` instance. Typically used like:: class MyCommand(Command): parser = Command.standard_parser() Subclasses may redefine ``standard_parser``, so use the nearest superclass's class method. """ parser = BoolOptionParser() if verbose: parser.add_option('-v', '--verbose', action='count', dest='verbose', default=0) if quiet: parser.add_option('-q', '--quiet', action='count', dest='quiet', default=0) if no_interactive: parser.add_option('--no-interactive', action="count", dest="no_interactive", default=0) if interactive: parser.add_option('-i', '--interactive', action='count', dest='interactive', default=0) if simulate: parser.add_option('-n', '--simulate', action='store_true', dest='simulate', default=False) if overwrite: parser.add_option('-f', '--overwrite', dest="overwrite", action="store_true", help="Overwrite files (warnings will be emitted for non-matching files otherwise)") return parser
python
{ "resource": "" }
q6443
Command.quote_first_command_arg
train
def quote_first_command_arg(self, arg): """ There's a bug in Windows when running an executable that's located inside a path with a space in it. This method handles that case, or on non-Windows systems or an executable with no spaces, it just leaves well enough alone. """ if (sys.platform != 'win32' or ' ' not in arg): # Problem does not apply: return arg try: import win32api except ImportError: raise ValueError( "The executable %r contains a space, and in order to " "handle this issue you must have the win32api module " "installed" % arg) arg = win32api.GetShortPathName(arg) return arg
python
{ "resource": "" }
q6444
Command.logging_file_config
train
def logging_file_config(self, config_file): """ Setup logging via the logging module's fileConfig function with the specified ``config_file``, if applicable. ConfigParser defaults are specified for the special ``__file__`` and ``here`` variables, similar to PasteDeploy config loading. """ parser = ConfigParser.ConfigParser() parser.read([config_file]) if parser.has_section('loggers'): config_file = os.path.abspath(config_file) fileConfig(config_file, dict(__file__=config_file, here=os.path.dirname(config_file)))
python
{ "resource": "" }
q6445
finish_job
train
def finish_job(client, cleanup_job, job_completed_normally, client_outputs, pulsar_outputs): """Process for "un-staging" a complete Pulsar job. This function is responsible for downloading results from remote server and cleaning up Pulsar staging directory (if needed.) """ collection_failure_exceptions = [] if job_completed_normally: output_collector = ClientOutputCollector(client) action_mapper = FileActionMapper(client) results_stager = ResultsCollector(output_collector, action_mapper, client_outputs, pulsar_outputs) collection_failure_exceptions = results_stager.collect() _clean(collection_failure_exceptions, cleanup_job, client) return collection_failure_exceptions
python
{ "resource": "" }
q6446
BaseDrmaaManager.shutdown
train
def shutdown(self, timeout=None): """Cleanup DRMAA session and call shutdown of parent.""" try: super(BaseDrmaaManager, self).shutdown(timeout) except Exception: pass self.drmaa_session.close()
python
{ "resource": "" }
q6447
Cache.cache_file
train
def cache_file(self, local_path, ip, path): """ Move a file from a temporary staging area into the cache. """ destination = self.__destination(ip, path) atomicish_move(local_path, destination)
python
{ "resource": "" }
q6448
build_managers
train
def build_managers(app, conf): """ Takes in a config file as outlined in job_managers.ini.sample and builds a dictionary of job manager objects from them. """ # Load default options from config file that apply to all # managers. default_options = _get_default_options(conf) manager_descriptions = ManagerDescriptions() if "job_managers_config" in conf: job_managers_config = conf.get("job_managers_config", None) _populate_manager_descriptions_from_ini(manager_descriptions, job_managers_config) elif "managers" in conf: for manager_name, manager_options in conf["managers"].items(): manager_description = ManagerDescription.from_dict(manager_options, manager_name) manager_descriptions.add(manager_description) elif "manager" in conf: manager_description = ManagerDescription.from_dict(conf["manager"]) manager_descriptions.add(manager_description) else: manager_descriptions.add(ManagerDescription()) manager_classes = _get_managers_dict() managers = {} for manager_name, manager_description in manager_descriptions.descriptions.items(): manager_options = dict(default_options) manager_options.update(manager_description.manager_options) manager_class = manager_classes[manager_description.manager_type] manager = _build_manager(manager_class, app, manager_name, manager_options) managers[manager_name] = manager return managers
python
{ "resource": "" }
q6449
fix_type_error
train
def fix_type_error(exc_info, callable, varargs, kwargs): """ Given an exception, this will test if the exception was due to a signature error, and annotate the error with better information if so. Usage:: try: val = callable(*args, **kw) except TypeError: exc_info = fix_type_error(None, callable, args, kw) raise exc_info[0], exc_info[1], exc_info[2] """ if exc_info is None: exc_info = sys.exc_info() if (exc_info[0] != TypeError or str(exc_info[1]).find('arguments') == -1 or getattr(exc_info[1], '_type_error_fixed', False)): return exc_info exc_info[1]._type_error_fixed = True argspec = inspect.formatargspec(*inspect.getargspec(callable)) args = ', '.join(map(_short_repr, varargs)) if kwargs and args: args += ', ' if kwargs: kwargs = kwargs.items() kwargs.sort() args += ', '.join(['%s=...' % n for n, v in kwargs]) gotspec = '(%s)' % args msg = '%s; got %s, wanted %s' % (exc_info[1], gotspec, argspec) exc_info[1].args = (msg,) return exc_info
python
{ "resource": "" }
q6450
RemoteJobDirectory.calculate_path
train
def calculate_path(self, remote_relative_path, input_type): """ Only for used by Pulsar client, should override for managers to enforce security and make the directory if needed. """ directory, allow_nested_files = self._directory_for_file_type(input_type) return self.path_helper.remote_join(directory, remote_relative_path)
python
{ "resource": "" }
q6451
StatefulManagerProxy.get_status
train
def get_status(self, job_id): """ Compute status used proxied manager and handle state transitions and track additional state information needed. """ job_directory = self._proxied_manager.job_directory(job_id) with job_directory.lock("status"): proxy_status, state_change = self.__proxy_status(job_directory, job_id) if state_change == "to_complete": self.__deactivate(job_id, proxy_status) elif state_change == "to_running": self.__state_change_callback(status.RUNNING, job_id) return self.__status(job_directory, proxy_status)
python
{ "resource": "" }
q6452
StatefulManagerProxy.__proxy_status
train
def __proxy_status(self, job_directory, job_id): """ Determine state with proxied job manager and if this job needs to be marked as deactivated (this occurs when job first returns a complete status from proxy. """ state_change = None if job_directory.has_metadata(JOB_FILE_PREPROCESSING_FAILED): proxy_status = status.FAILED job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status) state_change = "to_complete" elif not job_directory.has_metadata(JOB_FILE_PREPROCESSED): proxy_status = status.PREPROCESSING elif job_directory.has_metadata(JOB_FILE_FINAL_STATUS): proxy_status = job_directory.load_metadata(JOB_FILE_FINAL_STATUS) else: proxy_status = self._proxied_manager.get_status(job_id) if proxy_status == status.RUNNING: if not job_directory.has_metadata(JOB_METADATA_RUNNING): job_directory.store_metadata(JOB_METADATA_RUNNING, True) state_change = "to_running" elif proxy_status in [status.COMPLETE, status.CANCELLED]: job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status) state_change = "to_complete" return proxy_status, state_change
python
{ "resource": "" }
q6453
PulsarOutputs.output_extras
train
def output_extras(self, output_file): """ Returns dict mapping local path to remote name. """ output_directory = dirname(output_file) def local_path(name): return join(output_directory, self.path_helper.local_name(name)) files_directory = "%s_files%s" % (basename(output_file)[0:-len(".dat")], self.path_helper.separator) names = filter(lambda o: o.startswith(files_directory), self.output_directory_contents) return dict(map(lambda name: (local_path(name), name), names))
python
{ "resource": "" }
q6454
sudo_popen
train
def sudo_popen(*args, **kwargs): """ Helper method for building and executing Popen command. This is potentially sensetive code so should probably be centralized. """ user = kwargs.get("user", None) full_command = [SUDO_PATH, SUDO_PRESERVE_ENVIRONMENT_ARG] if user: full_command.extend([SUDO_USER_ARG, user]) full_command.extend(args) log.info("About to execute the following sudo command - [%s]" % ' '.join(full_command)) p = Popen(full_command, shell=False, stdout=PIPE, stderr=PIPE) return p
python
{ "resource": "" }
q6455
LineManager.add_family
train
def add_family(self, major_number): """ Expand to a new release line with given ``major_number``. This will flesh out mandatory buckets like ``unreleased_bugfix`` and do other necessary bookkeeping. """ # Normally, we have separate buckets for bugfixes vs features keys = ['unreleased_bugfix', 'unreleased_feature'] # But unstable prehistorical releases roll all up into just # 'unreleased' if major_number == 0 and self.config.releases_unstable_prehistory: keys = ['unreleased'] # Either way, the buckets default to an empty list self[major_number] = {key: [] for key in keys}
python
{ "resource": "" }
q6456
parse_changelog
train
def parse_changelog(path, **kwargs): """ Load and parse changelog file from ``path``, returning data structures. This function does not alter any files on disk; it is solely for introspecting a Releases ``changelog.rst`` and programmatically answering questions like "are there any unreleased bugfixes for the 2.3 line?" or "what was included in release 1.2.1?". For example, answering the above questions is as simple as:: changelog = parse_changelog("/path/to/changelog") print("Unreleased issues for 2.3.x: {}".format(changelog['2.3'])) print("Contents of v1.2.1: {}".format(changelog['1.2.1'])) Aside from the documented arguments, any additional keyword arguments are passed unmodified into an internal `get_doctree` call (which then passes them to `make_app`). :param str path: A relative or absolute file path string. :returns: A dict whose keys map to lists of ``releases.models.Issue`` objects, as follows: - Actual releases are full version number keys, such as ``"1.2.1"`` or ``"2.0.0"``. - Unreleased bugs (or bug-like issues; see the Releases docs) are stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``. - Unreleased features (or feature-like issues) are found in ``"unreleased_N_feature"``, where ``N`` is one of the major release families (so, a changelog spanning only 1.x will only have ``unreleased_1_feature``, whereas one with 1.x and 2.x releases will have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc). .. versionchanged:: 1.6 Added support for passing kwargs to `get_doctree`/`make_app`. """ app, doctree = get_doctree(path, **kwargs) # Have to semi-reproduce the 'find first bullet list' bit from main code, # which is unfortunately side-effect-heavy (thanks to Sphinx plugin # design). first_list = None for node in doctree[0]: if isinstance(node, bullet_list): first_list = node break # Initial parse into the structures Releases finds useful internally releases, manager = construct_releases(first_list.children, app) ret = changelog2dict(releases) # Stitch them together into something an end-user would find better: # - nuke unreleased_N.N_Y as their contents will be represented in the # per-line buckets for key in ret.copy(): if key.startswith('unreleased'): del ret[key] for family in manager: # - remove unreleased_bugfix, as they are accounted for in the per-line # buckets too. No need to store anywhere. manager[family].pop('unreleased_bugfix', None) # - bring over each major family's unreleased_feature as # unreleased_N_feature unreleased = manager[family].pop('unreleased_feature', None) if unreleased is not None: ret['unreleased_{}_feature'.format(family)] = unreleased # - bring over all per-line buckets from manager (flattening) # Here, all that's left in the per-family bucket should be lines, not # unreleased_* ret.update(manager[family]) return ret
python
{ "resource": "" }
q6457
get_doctree
train
def get_doctree(path, **kwargs): """ Obtain a Sphinx doctree from the RST file at ``path``. Performs no Releases-specific processing; this code would, ideally, be in Sphinx itself, but things there are pretty tightly coupled. So we wrote this. Any additional kwargs are passed unmodified into an internal `make_app` call. :param str path: A relative or absolute file path string. :returns: A two-tuple of the generated ``sphinx.application.Sphinx`` app and the doctree (a ``docutils.document`` object). .. versionchanged:: 1.6 Added support for passing kwargs to `make_app`. """ root, filename = os.path.split(path) docname, _ = os.path.splitext(filename) # TODO: this only works for top level changelog files (i.e. ones where # their dirname is the project/doc root) app = make_app(srcdir=root, **kwargs) # Create & init a BuildEnvironment. Mm, tasty side effects. app._init_env(freshenv=True) env = app.env # More arity/API changes: Sphinx 1.3/1.4-ish require one to pass in the app # obj in BuildEnvironment.update(); modern Sphinx performs that inside # Application._init_env() (which we just called above) and so that kwarg is # removed from update(). EAFP. kwargs = dict( config=app.config, srcdir=root, doctreedir=app.doctreedir, app=app, ) try: env.update(**kwargs) except TypeError: # Assume newer Sphinx w/o an app= kwarg del kwargs['app'] env.update(**kwargs) # Code taken from sphinx.environment.read_doc; easier to manually call # it with a working Environment object, instead of doing more random crap # to trick the higher up build system into thinking our single changelog # document was "updated". env.temp_data['docname'] = docname env.app = app # NOTE: SphinxStandaloneReader API changed in 1.4 :( reader_kwargs = { 'app': app, 'parsers': env.config.source_parsers, } if sphinx.version_info[:2] < (1, 4): del reader_kwargs['app'] # This monkeypatches (!!!) docutils to 'inject' all registered Sphinx # domains' roles & so forth. Without this, rendering the doctree lacks # almost all Sphinx magic, including things like :ref: and :doc:! with sphinx_domains(env): try: reader = SphinxStandaloneReader(**reader_kwargs) except TypeError: # If we import from io, this happens automagically, not in API del reader_kwargs['parsers'] reader = SphinxStandaloneReader(**reader_kwargs) pub = Publisher(reader=reader, writer=SphinxDummyWriter(), destination_class=NullOutput) pub.set_components(None, 'restructuredtext', None) pub.process_programmatic_settings(None, env.settings, None) # NOTE: docname derived higher up, from our given path src_path = env.doc2path(docname) source = SphinxFileInput( app, env, source=None, source_path=src_path, encoding=env.config.source_encoding, ) pub.source = source pub.settings._source = src_path pub.set_destination(None, None) pub.publish() return app, pub.document
python
{ "resource": "" }
q6458
load_conf
train
def load_conf(srcdir): """ Load ``conf.py`` from given ``srcdir``. :returns: Dictionary derived from the conf module. """ path = os.path.join(srcdir, 'conf.py') mylocals = {'__file__': path} with open(path) as fd: exec(fd.read(), mylocals) return mylocals
python
{ "resource": "" }
q6459
make_app
train
def make_app(**kwargs): """ Create a dummy Sphinx app, filling in various hardcoded assumptions. For example, Sphinx assumes the existence of various source/dest directories, even if you're only calling internals that never generate (or sometimes, even read!) on-disk files. This function creates safe temp directories for these instances. It also neuters Sphinx's internal logging, which otherwise causes verbosity in one's own test output and/or debug logs. Finally, it does load the given srcdir's ``conf.py``, but only to read specific bits like ``extensions`` (if requested); most of it is ignored. All args are stored in a single ``**kwargs``. Aside from the params listed below (all of which are optional), all kwargs given are turned into 'releases_xxx' config settings; e.g. ``make_app(foo='bar')`` is like setting ``releases_foo = 'bar'`` in ``conf.py``. :param str docname: Override the document name used (mostly for internal testing). :param str srcdir: Sphinx source directory path. :param str dstdir: Sphinx dest directory path. :param str doctreedir: Sphinx doctree directory path. :param bool load_extensions: Whether to load the real ``conf.py`` and setup any extensions it configures. Default: ``False``. :returns: A Sphinx ``Application`` instance. .. versionchanged:: 1.6 Added the ``load_extensions`` kwarg. """ srcdir = kwargs.pop('srcdir', mkdtemp()) dstdir = kwargs.pop('dstdir', mkdtemp()) doctreedir = kwargs.pop('doctreedir', mkdtemp()) load_extensions = kwargs.pop('load_extensions', False) real_conf = None try: # Sphinx <1.6ish Sphinx._log = lambda self, message, wfile, nonl=False: None # Sphinx >=1.6ish. Technically still lets Very Bad Things through, # unlike the total muting above, but probably OK. # NOTE: used to just do 'sphinx' but that stopped working, even on # sphinx 1.6.x. Weird. Unsure why hierarchy not functioning. for name in ('sphinx', 'sphinx.sphinx.application'): logging.getLogger(name).setLevel(logging.ERROR) # App API seems to work on all versions so far. app = Sphinx( srcdir=srcdir, confdir=None, outdir=dstdir, doctreedir=doctreedir, buildername='html', ) # Might as well load the conf file here too. if load_extensions: real_conf = load_conf(srcdir) finally: for d in (srcdir, dstdir, doctreedir): # Only remove empty dirs; non-empty dirs are implicitly something # that existed before we ran, and should not be touched. try: os.rmdir(d) except OSError: pass setup(app) # Mock out the config within. More assumptions by Sphinx :( # TODO: just use real config and overlay what truly needs changing? is that # feasible given the rest of the weird ordering we have to do? If it is, # maybe just literally slap this over the return value of load_conf()... config = { 'releases_release_uri': 'foo_%s', 'releases_issue_uri': 'bar_%s', 'releases_debug': False, 'master_doc': 'index', } # Allow tinkering with document filename if 'docname' in kwargs: app.env.temp_data['docname'] = kwargs.pop('docname') # Allow config overrides via kwargs for name in kwargs: config['releases_{}'.format(name)] = kwargs[name] # Stitch together as the sphinx app init() usually does w/ real conf files app.config._raw_config = config # init_values() requires a 'warn' runner on Sphinx 1.3-1.6, so if we seem # to be hitting arity errors, give it a dummy such callable. Hopefully # calling twice doesn't introduce any wacko state issues :( try: app.config.init_values() except TypeError: # boy I wish Python had an ArityError or w/e app.config.init_values(lambda x: x) # Initialize extensions (the internal call to this happens at init time, # which of course had no valid config yet here...) if load_extensions: for extension in real_conf.get('extensions', []): # But don't set up ourselves again, that causes errors if extension == 'releases': continue app.setup_extension(extension) return app
python
{ "resource": "" }
q6460
_log
train
def _log(txt, config): """ Log debug output if debug setting is on. Intended to be partial'd w/ config at top of functions. Meh. """ if config.releases_debug: sys.stderr.write(str(txt) + "\n") sys.stderr.flush()
python
{ "resource": "" }
q6461
scan_for_spec
train
def scan_for_spec(keyword): """ Attempt to return some sort of Spec from given keyword value. Returns None if one could not be derived. """ # Both 'spec' formats are wrapped in parens, discard keyword = keyword.lstrip('(').rstrip(')') # First, test for intermediate '1.2+' style matches = release_line_re.findall(keyword) if matches: return Spec(">={}".format(matches[0])) # Failing that, see if Spec can make sense of it try: return Spec(keyword) # I've only ever seen Spec fail with ValueError. except ValueError: return None
python
{ "resource": "" }
q6462
append_unreleased_entries
train
def append_unreleased_entries(app, manager, releases): """ Generate new abstract 'releases' for unreleased issues. There's one for each combination of bug-vs-feature & major release line. When only one major release line exists, that dimension is ignored. """ for family, lines in six.iteritems(manager): for type_ in ('bugfix', 'feature'): bucket = 'unreleased_{}'.format(type_) if bucket not in lines: # Implies unstable prehistory + 0.x fam continue issues = lines[bucket] fam_prefix = "{}.x ".format(family) if len(manager) > 1 else "" header = "Next {}{} release".format(fam_prefix, type_) line = "unreleased_{}.x_{}".format(family, type_) releases.append( generate_unreleased_entry(header, line, issues, manager, app) )
python
{ "resource": "" }
q6463
handle_first_release_line
train
def handle_first_release_line(entries, manager): """ Set up initial line-manager entry for first encountered release line. To be called at start of overall process; afterwards, subsequent major lines are generated by `handle_upcoming_major_release`. """ # It's remotely possible the changelog is totally empty... if not entries: return # Obtain (short-circuiting) first Release obj. first_release = None for obj in entries: if isinstance(obj, Release): first_release = obj break # It's also possible it's non-empty but has no releases yet. if first_release: manager.add_family(obj.family) # If God did not exist, man would be forced to invent him. else: manager.add_family(0)
python
{ "resource": "" }
q6464
Issue.minor_releases
train
def minor_releases(self, manager): """ Return all minor release line labels found in ``manager``. """ # TODO: yea deffo need a real object for 'manager', heh. E.g. we do a # very similar test for "do you have any actual releases yet?" # elsewhere. (This may be fodder for changing how we roll up # pre-major-release features though...?) return [ key for key, value in six.iteritems(manager) if any(x for x in value if not x.startswith('unreleased')) ]
python
{ "resource": "" }
q6465
Issue.default_spec
train
def default_spec(self, manager): """ Given the current release-lines structure, return a default Spec. Specifics: * For feature-like issues, only the highest major release is used, so given a ``manager`` with top level keys of ``[1, 2]``, this would return ``Spec(">=2")``. * When ``releases_always_forwardport_features`` is ``True``, that behavior is nullified, and this function always returns the empty ``Spec`` (which matches any and all versions/lines). * For bugfix-like issues, we only consider major release families which have actual releases already. * Thus the core difference here is that features are 'consumed' by upcoming major releases, and bugfixes are not. * When the ``unstable_prehistory`` setting is ``True``, the default spec starts at the oldest non-zero release line. (Otherwise, issues posted after prehistory ends would try being added to the 0.x part of the tree, which makes no sense in unstable-prehistory mode.) """ # TODO: I feel like this + the surrounding bits in add_to_manager() # could be consolidated & simplified... specstr = "" # Make sure truly-default spec skips 0.x if prehistory was unstable. stable_families = manager.stable_families if manager.config.releases_unstable_prehistory and stable_families: specstr = ">={}".format(min(stable_families)) if self.is_featurelike: # TODO: if app->config-><releases_always_forwardport_features or # w/e if True: specstr = ">={}".format(max(manager.keys())) else: # Can only meaningfully limit to minor release buckets if they # actually exist yet. buckets = self.minor_releases(manager) if buckets: specstr = ">={}".format(max(buckets)) return Spec(specstr) if specstr else Spec()
python
{ "resource": "" }
q6466
Issue.add_to_manager
train
def add_to_manager(self, manager): """ Given a 'manager' structure, add self to one or more of its 'buckets'. """ # Derive version spec allowing us to filter against major/minor buckets spec = self.spec or self.default_spec(manager) # Only look in appropriate major version/family; if self is an issue # declared as living in e.g. >=2, this means we don't even bother # looking in the 1.x family. families = [Version(str(x)) for x in manager] versions = list(spec.filter(families)) for version in versions: family = version.major # Within each family, we further limit which bugfix lines match up # to what self cares about (ignoring 'unreleased' until later) candidates = [ Version(x) for x in manager[family] if not x.startswith('unreleased') ] # Select matching release lines (& stringify) buckets = [] bugfix_buckets = [str(x) for x in spec.filter(candidates)] # Add back in unreleased_* as appropriate # TODO: probably leverage Issue subclasses for this eventually? if self.is_buglike: buckets.extend(bugfix_buckets) # Don't put into JUST unreleased_bugfix; it implies that this # major release/family hasn't actually seen any releases yet # and only exists for features to go into. if bugfix_buckets: buckets.append('unreleased_bugfix') # Obtain list of minor releases to check for "haven't had ANY # releases yet" corner case, in which case ALL issues get thrown in # unreleased_feature for the first release to consume. # NOTE: assumes first release is a minor or major one, # but...really? why would your first release be a bugfix one?? no_releases = not self.minor_releases(manager) if self.is_featurelike or self.backported or no_releases: buckets.append('unreleased_feature') # Now that we know which buckets are appropriate, add ourself to # all of them. TODO: or just...do it above...instead... for bucket in buckets: manager[family][bucket].append(self)
python
{ "resource": "" }
q6467
DocBuilder.cleanParagraph
train
def cleanParagraph(self): """ Compress text runs, remove whitespace at start and end, skip empty blocks, etc """ runs = self.block.content if not runs: self.block = None return if not self.clean_paragraphs: return joinedRuns = [] hasContent = False for run in runs: if run.content[0]: hasContent = True else: continue # For whitespace-only groups, remove any property stuff, # to avoid extra markup in output if not run.content[0].strip(): run.properties = {} # Join runs only if their properties match if joinedRuns and (run.properties == joinedRuns[-1].properties): joinedRuns[-1].content[0] += run.content[0] else: joinedRuns.append(run) if hasContent: # Strip beginning of paragraph joinedRuns[0].content[0] = joinedRuns[0].content[0].lstrip() # And then strip the end joinedRuns[-1].content[0] = joinedRuns[-1].content[0].rstrip() self.block.content = joinedRuns else: self.block = None
python
{ "resource": "" }
q6468
CSS.parse_css
train
def parse_css(self, css): """ Parse a css style sheet into the CSS object. For the moment this will only work for very simple css documents. It works by using regular expression matching css syntax. This is not bullet proof. """ rulesets = self.ruleset_re.findall(css) for (selector, declarations) in rulesets: rule = Rule(self.parse_selector(selector)) rule.properties = self.parse_declarations(declarations) self.rules.append(rule)
python
{ "resource": "" }
q6469
CSS.parse_declarations
train
def parse_declarations(self, declarations): """ parse a css declaration list """ declarations = self.declaration_re.findall(declarations) return dict(declarations)
python
{ "resource": "" }
q6470
CSS.parse_selector
train
def parse_selector(self, selector): """ parse a css selector """ tag, klass = self.selector_re.match(selector).groups() return Selector(tag, klass)
python
{ "resource": "" }
q6471
CSS.get_properties
train
def get_properties(self, node): """ return a dict of all the properties of a given BeautifulSoup node found by applying the css style. """ ret = {} # Try all the rules one by one for rule in self.rules: if rule.selector(node): ret.update(rule.properties) # Also search for direct 'style' arguments in the html doc for style_node in node.findParents(attrs={'style': True}): style = style_node.get('style') properties = self.parse_declarations(style) ret.update(properties) return ret
python
{ "resource": "" }
q6472
namedModule
train
def namedModule(name): """Return a module given its name.""" topLevel = __import__(name) packages = name.split(".")[1:] m = topLevel for p in packages: m = getattr(m, p) return m
python
{ "resource": "" }
q6473
namedObject
train
def namedObject(name): """Get a fully named module-global object. """ classSplit = name.split('.') module = namedModule('.'.join(classSplit[:-1])) return getattr(module, classSplit[-1])
python
{ "resource": "" }
q6474
RSTWriter.text
train
def text(self, text): """ process a pyth text and return the formatted string """ ret = u"".join(text.content) if 'url' in text.properties: return u"`%s`_" % ret if 'bold' in text.properties: return u"**%s**" % ret if 'italic' in text.properties: return u"*%s*" % ret if 'sub' in text.properties: return ur"\ :sub:`%s`\ " % ret if 'super' in text.properties: return ur"\ :sup:`%s`\ " % ret return ret
python
{ "resource": "" }
q6475
RSTWriter.paragraph
train
def paragraph(self, paragraph, prefix=""): """ process a pyth paragraph into the target """ content = [] for text in paragraph.content: content.append(self.text(text)) content = u"".join(content).encode("utf-8") for line in content.split("\n"): self.target.write(" " * self.indent) self.target.write(prefix) self.target.write(line) self.target.write("\n") if prefix: prefix = " " # handle the links if any('url' in text.properties for text in paragraph.content): self.target.write("\n") for text in paragraph.content: if 'url' in text.properties: string = u"".join(text.content) url = text.properties['url'] self.target.write(".. _%s: %s\n" % (string, url))
python
{ "resource": "" }
q6476
RSTWriter.list
train
def list(self, list, prefix=None): """ Process a pyth list into the target """ self.indent += 1 for (i, entry) in enumerate(list.content): for (j, paragraph) in enumerate(entry.content): prefix = "- " if j == 0 else " " handler = self.paragraphDispatch[paragraph.__class__] handler(paragraph, prefix) self.target.write("\n") self.indent -= 1
python
{ "resource": "" }
q6477
XHTMLReader.format
train
def format(self, soup): """format a BeautifulSoup document This will transform the block elements content from multi-lines text into single line. This allow us to avoid having to deal with further text rendering once this step has been done. """ # Remove all the newline characters before a closing tag. for node in soup.findAll(text=True): if node.rstrip(" ").endswith("\n"): node.replaceWith(node.rstrip(" ").rstrip("\n")) # Join the block elements lines into a single long line for tag in ['p', 'li']: for node in soup.findAll(tag): text = unicode(node) lines = [x.strip() for x in text.splitlines()] text = ' '.join(lines) node.replaceWith(BeautifulSoup.BeautifulSoup(text)) soup = BeautifulSoup.BeautifulSoup(unicode(soup)) # replace all <br/> tag by newline character for node in soup.findAll('br'): node.replaceWith("\n") soup = BeautifulSoup.BeautifulSoup(unicode(soup)) return soup
python
{ "resource": "" }
q6478
XHTMLReader.url
train
def url(self, node): """ return the url of a BeautifulSoup node or None if there is no url. """ a_node = node.findParent('a') if not a_node: return None if self.link_callback is None: return a_node.get('href') else: return self.link_callback(a_node.get('href'))
python
{ "resource": "" }
q6479
XHTMLReader.process_text
train
def process_text(self, node): """ Return a pyth Text object from a BeautifulSoup node or None if the text is empty. """ text = node.string.strip() if not text: return # Set all the properties properties=dict() if self.is_bold(node): properties['bold'] = True if self.is_italic(node): properties['italic'] = True if self.url(node): properties['url'] = self.url(node) if self.is_sub(node): properties['sub'] = True if self.is_super(node): properties['super'] = True content=[node.string] return document.Text(properties, content)
python
{ "resource": "" }
q6480
XHTMLReader.process_into
train
def process_into(self, node, obj): """ Process a BeautifulSoup node and fill its elements into a pyth base object. """ if isinstance(node, BeautifulSoup.NavigableString): text = self.process_text(node) if text: obj.append(text) return if node.name == 'p': # add a new paragraph into the pyth object new_obj = document.Paragraph() obj.append(new_obj) obj = new_obj elif node.name == 'ul': # add a new list new_obj = document.List() obj.append(new_obj) obj = new_obj elif node.name == 'li': # add a new list entry new_obj = document.ListEntry() obj.append(new_obj) obj = new_obj for child in node: self.process_into(child, obj)
python
{ "resource": "" }
q6481
_PythBase.append
train
def append(self, item): """ Try to add an item to this element. If the item is of the wrong type, and if this element has a sub-type, then try to create such a sub-type and insert the item into that, instead. This happens recursively, so (in python-markup): L [ u'Foo' ] actually creates: L [ LE [ P [ T [ u'Foo' ] ] ] ] If that doesn't work, raise a TypeError. """ okay = True if not isinstance(item, self.contentType): if hasattr(self.contentType, 'contentType'): try: item = self.contentType(content=[item]) except TypeError: okay = False else: okay = False if not okay: raise TypeError("Wrong content type for %s: %s (%s)" % ( self.__class__.__name__, repr(type(item)), repr(item))) self.content.append(item)
python
{ "resource": "" }
q6482
LatexWriter.write
train
def write(klass, document, target=None, stylesheet=""): """ convert a pyth document to a latex document we can specify a stylesheet as a latex document fragment that will be inserted after the headers. This way we can override the default style. """ writer = LatexWriter(document, target, stylesheet) return writer.go()
python
{ "resource": "" }
q6483
LatexWriter.full_stylesheet
train
def full_stylesheet(self): """ Return the style sheet that will ultimately be inserted into the latex document. This is the user given style sheet plus some additional parts to add the meta data. """ latex_fragment = r""" \usepackage[colorlinks=true,linkcolor=blue,urlcolor=blue]{hyperref} \hypersetup{ pdftitle={%s}, pdfauthor={%s}, pdfsubject={%s} } """ % (self.document.properties.get("title"), self.document.properties.get("author"), self.document.properties.get("subject")) return latex_fragment + self.stylesheet
python
{ "resource": "" }
q6484
Endpoints.get_stream_url
train
def get_stream_url(self, session_id, stream_id=None): """ this method returns the url to get streams information """ url = self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/stream' if stream_id: url = url + '/' + stream_id return url
python
{ "resource": "" }
q6485
Endpoints.force_disconnect_url
train
def force_disconnect_url(self, session_id, connection_id): """ this method returns the force disconnect url endpoint """ url = ( self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/connection/' + connection_id ) return url
python
{ "resource": "" }
q6486
Endpoints.set_archive_layout_url
train
def set_archive_layout_url(self, archive_id): """ this method returns the url to set the archive layout """ url = self.api_url + '/v2/project/' + self.api_key + '/archive/' + archive_id + '/layout' return url
python
{ "resource": "" }
q6487
Endpoints.set_stream_class_lists_url
train
def set_stream_class_lists_url(self, session_id): """ this method returns the url to set the stream class list """ url = self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/stream' return url
python
{ "resource": "" }
q6488
Endpoints.broadcast_url
train
def broadcast_url(self, broadcast_id=None, stop=False, layout=False): """ this method returns urls for working with broadcast """ url = self.api_url + '/v2/project/' + self.api_key + '/broadcast' if broadcast_id: url = url + '/' + broadcast_id if stop: url = url + '/stop' if layout: url = url + '/layout' return url
python
{ "resource": "" }
q6489
Archive.attrs
train
def attrs(self): """ Returns a dictionary of the archive's attributes. """ return dict((k, v) for k, v in iteritems(self.__dict__) if k is not "sdk")
python
{ "resource": "" }
q6490
OpenTok.create_session
train
def create_session(self, location=None, media_mode=MediaModes.relayed, archive_mode=ArchiveModes.manual): """ Creates a new OpenTok session and returns the session ID, which uniquely identifies the session. For example, when using the OpenTok JavaScript library, use the session ID when calling the OT.initSession() method (to initialize an OpenTok session). OpenTok sessions do not expire. However, authentication tokens do expire (see the generateToken() method). Also note that sessions cannot explicitly be destroyed. A session ID string can be up to 255 characters long. Calling this method results in an OpenTokException in the event of an error. Check the error message for details. You can also create a session using the OpenTok `REST API <https://tokbox.com/opentok/api/#session_id_production>`_ or `the OpenTok dashboard <https://dashboard.tokbox.com/projects>`_. :param String media_mode: Determines whether the session will transmit streams using the OpenTok Media Router (MediaMode.routed) or not (MediaMode.relayed). By default, the setting is MediaMode.relayed. With the media_mode property set to MediaMode.relayed, the session will attempt to transmit streams directly between clients. If clients cannot connect due to firewall restrictions, the session uses the OpenTok TURN server to relay audio-video streams. The `OpenTok Media Router <https://tokbox.com/opentok/tutorials/create-session/#media-mode>`_ provides the following benefits: * The OpenTok Media Router can decrease bandwidth usage in multiparty sessions. (When the mediaMode property is set to MediaMode.relayed, each client must send a separate audio-video stream to each client subscribing to it.) * The OpenTok Media Router can improve the quality of the user experience through audio fallback and video recovery (see https://tokbox.com/platform/fallback). With these features, if a client's connectivity degrades to a degree that it does not support video for a stream it's subscribing to, the video is dropped on that client (without affecting other clients), and the client receives audio only. If the client's connectivity improves, the video returns. * The OpenTok Media Router supports the archiving feature, which lets you record, save, and retrieve OpenTok sessions (see http://tokbox.com/platform/archiving). :param String archive_mode: Whether the session is automatically archived (ArchiveModes.always) or not (ArchiveModes.manual). By default, the setting is ArchiveModes.manual, and you must call the start_archive() method of the OpenTok object to start archiving. To archive the session (either automatically or not), you must set the media_mode parameter to MediaModes.routed. :param String location: An IP address that the OpenTok servers will use to situate the session in its global network. If you do not set a location hint, the OpenTok servers will be based on the first client connecting to the session. :rtype: The Session object. The session_id property of the object is the session ID. """ # build options options = {} if not isinstance(media_mode, MediaModes): raise OpenTokException(u('Cannot create session, {0} is not a valid media mode').format(media_mode)) if not isinstance(archive_mode, ArchiveModes): raise OpenTokException(u('Cannot create session, {0} is not a valid archive mode').format(archive_mode)) if archive_mode == ArchiveModes.always and media_mode != MediaModes.routed: raise OpenTokException(u('A session with always archive mode must also have the routed media mode.')) options[u('p2p.preference')] = media_mode.value options[u('archiveMode')] = archive_mode.value if location: # validate IP address try: inet_aton(location) except: raise OpenTokException(u('Cannot create session. Location must be either None or a valid IPv4 address {0}').format(location)) options[u('location')] = location try: response = requests.post(self.endpoints.session_url(), data=options, headers=self.headers(), proxies=self.proxies, timeout=self.timeout) response.encoding = 'utf-8' if response.status_code == 403: raise AuthError('Failed to create session, invalid credentials') if not response.content: raise RequestError() dom = xmldom.parseString(response.content) except Exception as e: raise RequestError('Failed to create session: %s' % str(e)) try: error = dom.getElementsByTagName('error') if error: error = error[0] raise AuthError('Failed to create session (code=%s): %s' % (error.attributes['code'].value, error.firstChild.attributes['message'].value)) session_id = dom.getElementsByTagName('session_id')[0].childNodes[0].nodeValue return Session(self, session_id, location=location, media_mode=media_mode, archive_mode=archive_mode) except Exception as e: raise OpenTokException('Failed to generate session: %s' % str(e))
python
{ "resource": "" }
q6491
OpenTok.start_archive
train
def start_archive(self, session_id, has_audio=True, has_video=True, name=None, output_mode=OutputModes.composed, resolution=None): """ Starts archiving an OpenTok session. Clients must be actively connected to the OpenTok session for you to successfully start recording an archive. You can only record one archive at a time for a given session. You can only record archives of sessions that use the OpenTok Media Router (sessions with the media mode set to routed); you cannot archive sessions with the media mode set to relayed. For more information on archiving, see the `OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide. :param String session_id: The session ID of the OpenTok session to archive. :param String name: This is the name of the archive. You can use this name to identify the archive. It is a property of the Archive object, and it is a property of archive-related events in the OpenTok.js library. :param Boolean has_audio: if set to True, an audio track will be inserted to the archive. has_audio is an optional parameter that is set to True by default. If you set both has_audio and has_video to False, the call to the start_archive() method results in an error. :param Boolean has_video: if set to True, a video track will be inserted to the archive. has_video is an optional parameter that is set to True by default. :param OutputModes output_mode: Whether all streams in the archive are recorded to a single file (OutputModes.composed, the default) or to individual files (OutputModes.individual). :param String resolution (Optional): The resolution of the archive, either "640x480" (the default) or "1280x720". This parameter only applies to composed archives. If you set this parameter and set the output_mode parameter to OutputModes.individual, the call to the start_archive() method results in an error. :rtype: The Archive object, which includes properties defining the archive, including the archive ID. """ if not isinstance(output_mode, OutputModes): raise OpenTokException(u('Cannot start archive, {0} is not a valid output mode').format(output_mode)) if resolution and output_mode == OutputModes.individual: raise OpenTokException(u('Invalid parameters: Resolution cannot be supplied for individual output mode.')) payload = {'name': name, 'sessionId': session_id, 'hasAudio': has_audio, 'hasVideo': has_video, 'outputMode': output_mode.value, 'resolution': resolution, } response = requests.post(self.endpoints.archive_url(), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout) if response.status_code < 300: return Archive(self, response.json()) elif response.status_code == 403: raise AuthError() elif response.status_code == 400: """ The HTTP response has a 400 status code in the following cases: You do not pass in a session ID or you pass in an invalid session ID. No clients are actively connected to the OpenTok session. You specify an invalid resolution value. The outputMode property is set to "individual" and you set the resolution property and (which is not supported in individual stream archives). """ raise RequestError(response.json().get("message")) elif response.status_code == 404: raise NotFoundError("Session not found") elif response.status_code == 409: raise ArchiveError(response.json().get("message")) else: raise RequestError("An unexpected error occurred", response.status_code)
python
{ "resource": "" }
q6492
OpenTok.delete_archive
train
def delete_archive(self, archive_id): """ Deletes an OpenTok archive. You can only delete an archive which has a status of "available" or "uploaded". Deleting an archive removes its record from the list of archives. For an "available" archive, it also removes the archive file, making it unavailable for download. :param String archive_id: The archive ID of the archive to be deleted. """ response = requests.delete(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout) if response.status_code < 300: pass elif response.status_code == 403: raise AuthError() elif response.status_code == 404: raise NotFoundError("Archive not found") else: raise RequestError("An unexpected error occurred", response.status_code)
python
{ "resource": "" }
q6493
OpenTok.get_archive
train
def get_archive(self, archive_id): """Gets an Archive object for the given archive ID. :param String archive_id: The archive ID. :rtype: The Archive object. """ response = requests.get(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout) if response.status_code < 300: return Archive(self, response.json()) elif response.status_code == 403: raise AuthError() elif response.status_code == 404: raise NotFoundError("Archive not found") else: raise RequestError("An unexpected error occurred", response.status_code)
python
{ "resource": "" }
q6494
OpenTok.get_archives
train
def get_archives(self, offset=None, count=None, session_id=None): """Returns an ArchiveList, which is an array of archives that are completed and in-progress, for your API key. :param int: offset Optional. The index offset of the first archive. 0 is offset of the most recently started archive. 1 is the offset of the archive that started prior to the most recent archive. If you do not specify an offset, 0 is used. :param int: count Optional. The number of archives to be returned. The maximum number of archives returned is 1000. :param string: session_id Optional. Used to list archives for a specific session ID. :rtype: An ArchiveList object, which is an array of Archive objects. """ params = {} if offset is not None: params['offset'] = offset if count is not None: params['count'] = count if session_id is not None: params['sessionId'] = session_id endpoint = self.endpoints.archive_url() + "?" + urlencode(params) response = requests.get( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code < 300: return ArchiveList(self, response.json()) elif response.status_code == 403: raise AuthError() elif response.status_code == 404: raise NotFoundError("Archive not found") else: raise RequestError("An unexpected error occurred", response.status_code)
python
{ "resource": "" }
q6495
OpenTok.signal
train
def signal(self, session_id, payload, connection_id=None): """ Send signals to all participants in an active OpenTok session or to a specific client connected to that session. :param String session_id: The session ID of the OpenTok session that receives the signal :param Dictionary payload: Structure that contains both the type and data fields. These correspond to the type and data parameters passed in the client signal received handlers :param String connection_id: The connection_id parameter is an optional string used to specify the connection ID of a client connected to the session. If you specify this value, the signal is sent to the specified client. Otherwise, the signal is sent to all clients connected to the session """ response = requests.post( self.endpoints.signaling_url(session_id, connection_id), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 204: pass elif response.status_code == 400: raise SignalingError('One of the signal properties - data, type, sessionId or connectionId - is invalid.') elif response.status_code == 403: raise AuthError('You are not authorized to send the signal. Check your authentication credentials.') elif response.status_code == 404: raise SignalingError('The client specified by the connectionId property is not connected to the session.') elif response.status_code == 413: raise SignalingError('The type string exceeds the maximum length (128 bytes), or the data string exceeds the maximum size (8 kB).') else: raise RequestError('An unexpected error occurred', response.status_code)
python
{ "resource": "" }
q6496
OpenTok.force_disconnect
train
def force_disconnect(self, session_id, connection_id): """ Sends a request to disconnect a client from an OpenTok session :param String session_id: The session ID of the OpenTok session from which the client will be disconnected :param String connection_id: The connection ID of the client that will be disconnected """ endpoint = self.endpoints.force_disconnect_url(session_id, connection_id) response = requests.delete( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 204: pass elif response.status_code == 400: raise ForceDisconnectError('One of the arguments - sessionId or connectionId - is invalid.') elif response.status_code == 403: raise AuthError('You are not authorized to forceDisconnect, check your authentication credentials.') elif response.status_code == 404: raise ForceDisconnectError('The client specified by the connectionId property is not connected to the session.') else: raise RequestError('An unexpected error occurred', response.status_code)
python
{ "resource": "" }
q6497
OpenTok.set_archive_layout
train
def set_archive_layout(self, archive_id, layout_type, stylesheet=None): """ Use this method to change the layout of videos in an OpenTok archive :param String archive_id: The ID of the archive that will be updated :param String layout_type: The layout type for the archive. Valid values are: 'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation' :param String stylesheet optional: CSS used to style the custom layout. Specify this only if you set the type property to 'custom' """ payload = { 'type': layout_type, } if layout_type == 'custom': if stylesheet is not None: payload['stylesheet'] = stylesheet endpoint = self.endpoints.set_archive_layout_url(archive_id) response = requests.put( endpoint, data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: pass elif response.status_code == 400: raise ArchiveError('Invalid request. This response may indicate that data in your request data is invalid JSON. It may also indicate that you passed in invalid layout options.') elif response.status_code == 403: raise AuthError('Authentication error.') else: raise RequestError('OpenTok server error.', response.status_code)
python
{ "resource": "" }
q6498
OpenTok.dial
train
def dial(self, session_id, token, sip_uri, options=[]): """ Use this method to connect a SIP platform to an OpenTok session. The audio from the end of the SIP call is added to the OpenTok session as an audio-only stream. The OpenTok Media Router mixes audio from other streams in the session and sends the mixed audio to the SIP endpoint :param String session_id: The OpenTok session ID for the SIP call to join :param String token: The OpenTok token to be used for the participant being called :param String sip_uri: The SIP URI to be used as destination of the SIP call initiated from OpenTok to the SIP platform :param Dictionary options optional: Aditional options with the following properties: String 'from': The number or string that will be sent to the final SIP number as the caller Dictionary 'headers': Defines custom headers to be added to the SIP INVITE request initiated from OpenTok to the SIP platform. Each of the custom headers must start with the "X-" prefix, or the call will result in a Bad Request (400) response Dictionary 'auth': Contains the username and password to be used in the the SIP INVITE request for HTTP digest authentication, if it is required by the SIP platform For example: 'auth': { 'username': 'username', 'password': 'password' } Boolean 'secure': A Boolean flag that indicates whether the media must be transmitted encrypted (true) or not (false, the default) :rtype: A SipCall object, which contains data of the SIP call: id, connectionId and streamId """ payload = { 'sessionId': session_id, 'token': token, 'sip': { 'uri': sip_uri } } if 'from' in options: payload['sip']['from'] = options['from'] if 'headers' in options: payload['sip']['headers'] = options['headers'] if 'auth' in options: payload['sip']['auth'] = options['auth'] if 'secure' in options: payload['sip']['secure'] = options['secure'] endpoint = self.endpoints.dial_url() response = requests.post( endpoint, data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: return SipCall(response.json()) elif response.status_code == 400: raise SipDialError('Invalid request. Invalid session ID.') elif response.status_code == 403: raise AuthError('Authentication error.') elif response.status_code == 404: raise SipDialError('The session does not exist.') elif response.status_code == 409: raise SipDialError( 'You attempted to start a SIP call for a session that ' 'does not use the OpenTok Media Router.') else: raise RequestError('OpenTok server error.', response.status_code)
python
{ "resource": "" }
q6499
OpenTok.set_stream_class_lists
train
def set_stream_class_lists(self, session_id, payload): """ Use this method to change layout classes for OpenTok streams. The layout classes define how the streams are displayed in the layout of a composed OpenTok archive :param String session_id: The ID of the session of the streams that will be updated :param List payload: A list defining the class lists to apply to the streams. Each element in the list is a dictionary with two properties: 'id' and 'layoutClassList'. The 'id' property is the stream ID (a String), and the 'layoutClassList' is an array of class names (Strings) to apply to the stream. For example: payload = [ {'id': '7b09ec3c-26f9-43d7-8197-f608f13d4fb6', 'layoutClassList': ['focus']}, {'id': '567bc941-6ea0-4c69-97fc-70a740b68976', 'layoutClassList': ['top']}, {'id': '307dc941-0450-4c09-975c-705740d08970', 'layoutClassList': ['bottom']} ] """ items_payload = {'items': payload} endpoint = self.endpoints.set_stream_class_lists_url(session_id) response = requests.put( endpoint, data=json.dumps(items_payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: pass elif response.status_code == 400: raise SetStreamClassError( 'Invalid request. This response may indicate that data in your request data ' 'is invalid JSON. It may also indicate that you passed in invalid layout options.' ) elif response.status_code == 403: raise AuthError('Authentication error.') else: raise RequestError('OpenTok server error.', response.status_code)
python
{ "resource": "" }