code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
print 'Creating venv...', run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print 'done.' print 'Installing pip in virtualenv...', if not run_command([WITH_VENV, 'easy_install', 'pip']).strip(): die("Failed to install pip.") print 'done.' print 'Installing distribute in virtualenv...' pip_install('distribute>=0.6.24') print 'done.'
def create_virtualenv(venv=VENV)
Creates the virtual environment and installs PIP only into the virtual environment
4.815047
5.071628
0.949409
path = str(dataset_path) # Use false_path if needed. action = self.action_mapper.action(path, dataset_path_type) if action.staging_needed: if name is None: name = os.path.basename(path) remote_directory = self.__remote_directory(dataset_path_type) remote_path_rewrite = self.path_helper.remote_join(remote_directory, name) else: # Actions which don't require staging MUST define a path_rewrite # method. remote_path_rewrite = action.path_rewrite(self.path_helper) return remote_path_rewrite
def __remote_path_rewrite(self, dataset_path, dataset_path_type, name=None)
Return remote path of this file (if staging is required) else None.
4.793854
4.379113
1.094709
retries = 0 interval_range = __fxrange(interval_start, interval_max + interval_start, interval_step, repeatlast=True) for retries in count(): try: return fun(*args, **kwargs) except catch as exc: if max_retries and retries >= max_retries: raise tts = float(errback(exc, interval_range, retries) if errback else next(interval_range)) if tts: sleep(tts)
def _retry_over_time(fun, catch, args=[], kwargs={}, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30)
Retry the function over and over until max retries is exceeded. For each retry we sleep a for a while before we try again, this interval is increased for every retry until the max seconds is reached. :param fun: The function to try :param catch: Exceptions to catch, can be either tuple or a single exception class. :keyword args: Positional arguments passed on to the function. :keyword kwargs: Keyword arguments passed on to the function. :keyword max_retries: Maximum number of retries before we give up. If this is not set, we will retry forever. :keyword interval_start: How long (in seconds) we start sleeping between retries. :keyword interval_step: By how much the interval is increased for each retry. :keyword interval_max: Maximum number of seconds to sleep between retries.
4.628373
5.545827
0.834568
destination_params = _parse_destination_params(destination_params) destination_params.update(**kwargs) job_manager_interface_class = self.job_manager_interface_class job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args) job_manager_interface = job_manager_interface_class(**job_manager_interface_args) return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
def get_client(self, destination_params, job_id, **kwargs)
Build a client given specific destination parameters and job_id.
2.595501
2.463196
1.053713
shell = self.get_shell_plugin(shell_params) job_interface = self.get_job_interface(job_params) return shell, job_interface
def get_plugins(self, shell_params, job_params)
Return shell and job interface defined by and configured via specified params.
3.18808
2.235772
1.425941
destination_dir = os.path.dirname(destination) destination_name = os.path.basename(destination) temp_destination = os.path.join(destination_dir, "%s%s" % (destination_name, tmp_suffix)) shutil.move(source, temp_destination) os.rename(temp_destination, destination)
def atomicish_move(source, destination, tmp_suffix="_TMP")
Move source to destination without risk of partial moves. > from tempfile import mkdtemp > from os.path import join, exists > temp_dir = mkdtemp() > source = join(temp_dir, "the_source") > destination = join(temp_dir, "the_dest") > open(source, "wb").write(b"Hello World!") > assert exists(source) > assert not exists(destination) > atomicish_move(source, destination) > assert not exists(source) > assert exists(destination)
1.716793
2.006829
0.855476
''' Return the abstraction description of an environment variable definition into a statement for shell script. >>> env_to_statement(dict(name='X', value='Y')) 'X="Y"; export X' >>> env_to_statement(dict(name='X', value='Y', raw=True)) 'X=Y; export X' >>> env_to_statement(dict(name='X', value='"A","B","C"')) 'X="\\\\"A\\\\",\\\\"B\\\\",\\\\"C\\\\""; export X' >>> env_to_statement(dict(file="Y")) '. "Y"' >>> env_to_statement(dict(file="'RAW $FILE'", raw=True)) ". 'RAW $FILE'" >>> # Source file takes precedence >>> env_to_statement(dict(name='X', value='"A","B","C"', file="S")) '. "S"' >>> env_to_statement(dict(execute="module load java/1.5.1")) 'module load java/1.5.1' ''' source_file = env.get('file', None) if source_file: return '. %s' % __escape(source_file, env) execute = env.get('execute', None) if execute: return execute name = env['name'] value = __escape(env['value'], env) return '%s=%s; export %s' % (name, value, name)
def env_to_statement(env)
Return the abstraction description of an environment variable definition into a statement for shell script. >>> env_to_statement(dict(name='X', value='Y')) 'X="Y"; export X' >>> env_to_statement(dict(name='X', value='Y', raw=True)) 'X=Y; export X' >>> env_to_statement(dict(name='X', value='"A","B","C"')) 'X="\\\\"A\\\\",\\\\"B\\\\",\\\\"C\\\\""; export X' >>> env_to_statement(dict(file="Y")) '. "Y"' >>> env_to_statement(dict(file="'RAW $FILE'", raw=True)) ". 'RAW $FILE'" >>> # Source file takes precedence >>> env_to_statement(dict(name='X', value='"A","B","C"', file="S")) '. "S"' >>> env_to_statement(dict(execute="module load java/1.5.1")) 'module load java/1.5.1'
3.152969
1.343902
2.34613
temp_file = NamedTemporaryFile(delete=False) _copy_and_close(object, temp_file) return temp_file.name
def copy_to_temp(object)
Copy file-like object to temp file and return path.
3.450536
2.973157
1.160563
all_query_params = DEFAULT_QUERY_CLASSAD.copy() all_query_params.update(query_params) submit_description = [] for key, value in all_query_params.items(): submit_description.append('%s = %s' % (key, value)) submit_description.append('executable = ' + executable) submit_description.append('output = ' + output) submit_description.append('error = ' + error) submit_description.append('log = ' + user_log) submit_description.append('queue') return '\n'.join(submit_description)
def build_submit_description(executable, output, error, user_log, query_params)
Build up the contents of a condor submit description file. >>> submit_args = dict(executable='/path/to/script', output='o', error='e', user_log='ul') >>> submit_args['query_params'] = dict() >>> default_description = build_submit_description(**submit_args) >>> assert 'executable = /path/to/script' in default_description >>> assert 'output = o' in default_description >>> assert 'error = e' in default_description >>> assert 'queue' in default_description >>> assert 'universe = vanilla' in default_description >>> assert 'universe = standard' not in default_description >>> submit_args['query_params'] = dict(universe='standard') >>> std_description = build_submit_description(**submit_args) >>> assert 'universe = vanilla' not in std_description >>> assert 'universe = standard' in std_description
2.174424
2.343961
0.92767
external_id = None try: submit = Popen(('condor_submit', submit_file), stdout=PIPE, stderr=STDOUT) message, _ = submit.communicate() if submit.returncode == 0: external_id = parse_external_id(message, type='condor') else: message = PROBLEM_PARSING_EXTERNAL_ID except Exception as e: message = str(e) return external_id, message
def condor_submit(submit_file)
Submit a condor job described by the given file. Parse an external id for the submission or return None and a reason for the failure.
3.237281
2.97809
1.087033
failure_message = None try: check_call(('condor_rm', external_id)) except CalledProcessError: failure_message = "condor_rm failed" except Exception as e: "error encountered calling condor_rm: %s" % e return failure_message
def condor_stop(external_id)
Stop running condor job and return a failure_message if this fails.
3.928631
3.424098
1.147348
if self.lockfile: return self.lockfile.LockFile(path) else: with self.job_locks_lock: if path not in self.job_locks: lock = threading.Lock() self.job_locks[path] = lock else: lock = self.job_locks[path] return lock
def get_lock(self, path)
Get a job lock corresponding to the path - assumes parent directory exists but the file itself does not.
2.571878
2.344326
1.097065
try: shutdown_method = self._proxied_manager.shutdown except AttributeError: return shutdown_method(timeout)
def shutdown(self, timeout=None)
Optional.
6.689794
6.92996
0.965344
# Have defined a remote job directory, lets do the setup locally. if client.job_directory: handler = LocalSetupHandler(client, destination_args) else: handler = RemoteSetupHandler(client) return handler
def build(client, destination_args)
Build a SetupHandler object for client from destination parameters.
11.408287
9.031449
1.263173
template = DrmaaSession.session.createJobTemplate() try: for key in kwds: setattr(template, key, kwds[key]) with DrmaaSession.session_lock: return DrmaaSession.session.runJob(template) finally: DrmaaSession.session.deleteJobTemplate(template)
def run_job(self, **kwds)
Create a DRMAA job template, populate with specified properties, run the job, and return the external_job_id.
2.974736
2.364015
1.258341
if url.startswith("pulsar://"): url = url[len("pulsar://"):] if not url.endswith("/"): url += "/" # Check for private token embedded in the URL. A URL of the form # https://moo@cow:8913 will try to contact https://cow:8913 # with a private key of moo private_token_format = "https?://(.*)@.*/?" private_token_match = match(private_token_format, url) private_token = None if private_token_match: private_token = private_token_match.group(1) url = url.replace("%s@" % private_token, '', 1) destination_args = {"url": url, "private_token": private_token} return destination_args
def url_to_destination_params(url)
Convert a legacy runner URL to a job destination >>> params_simple = url_to_destination_params("http://localhost:8913/") >>> params_simple["url"] 'http://localhost:8913/' >>> params_simple["private_token"] is None True >>> advanced_url = "https://1234x@example.com:8914/managers/longqueue" >>> params_advanced = url_to_destination_params(advanced_url) >>> params_advanced["url"] 'https://example.com:8914/managers/longqueue/' >>> params_advanced["private_token"] '1234x' >>> runner_url = "pulsar://http://localhost:8913/" >>> runner_params = url_to_destination_params(runner_url) >>> runner_params['url'] 'http://localhost:8913/'
4.264085
3.632174
1.173976
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries, sleeptime=sleeptime)
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2)
This makes sure any open ports are closed. Does this by connecting to them until they give connection refused. Servers should call like:: import paste.script ensure_port_cleanup([80, 443])
3.035508
4.278369
0.709501
parser = BoolOptionParser() if verbose: parser.add_option('-v', '--verbose', action='count', dest='verbose', default=0) if quiet: parser.add_option('-q', '--quiet', action='count', dest='quiet', default=0) if no_interactive: parser.add_option('--no-interactive', action="count", dest="no_interactive", default=0) if interactive: parser.add_option('-i', '--interactive', action='count', dest='interactive', default=0) if simulate: parser.add_option('-n', '--simulate', action='store_true', dest='simulate', default=False) if overwrite: parser.add_option('-f', '--overwrite', dest="overwrite", action="store_true", help="Overwrite files (warnings will be emitted for non-matching files otherwise)") return parser
def standard_parser(cls, verbose=True, interactive=False, no_interactive=False, simulate=False, quiet=False, overwrite=False)
Create a standard ``OptionParser`` instance. Typically used like:: class MyCommand(Command): parser = Command.standard_parser() Subclasses may redefine ``standard_parser``, so use the nearest superclass's class method.
1.922443
2.042213
0.941353
if (sys.platform != 'win32' or ' ' not in arg): # Problem does not apply: return arg try: import win32api except ImportError: raise ValueError( "The executable %r contains a space, and in order to " "handle this issue you must have the win32api module " "installed" % arg) arg = win32api.GetShortPathName(arg) return arg
def quote_first_command_arg(self, arg)
There's a bug in Windows when running an executable that's located inside a path with a space in it. This method handles that case, or on non-Windows systems or an executable with no spaces, it just leaves well enough alone.
4.711285
4.064234
1.159206
parser = ConfigParser.ConfigParser() parser.read([config_file]) if parser.has_section('loggers'): config_file = os.path.abspath(config_file) fileConfig(config_file, dict(__file__=config_file, here=os.path.dirname(config_file)))
def logging_file_config(self, config_file)
Setup logging via the logging module's fileConfig function with the specified ``config_file``, if applicable. ConfigParser defaults are specified for the special ``__file__`` and ``here`` variables, similar to PasteDeploy config loading.
2.997156
2.282507
1.313099
collection_failure_exceptions = [] if job_completed_normally: output_collector = ClientOutputCollector(client) action_mapper = FileActionMapper(client) results_stager = ResultsCollector(output_collector, action_mapper, client_outputs, pulsar_outputs) collection_failure_exceptions = results_stager.collect() _clean(collection_failure_exceptions, cleanup_job, client) return collection_failure_exceptions
def finish_job(client, cleanup_job, job_completed_normally, client_outputs, pulsar_outputs)
Process for "un-staging" a complete Pulsar job. This function is responsible for downloading results from remote server and cleaning up Pulsar staging directory (if needed.)
4.972898
4.724848
1.052499
source = os.path.abspath(source) destination = os.path.abspath(destination) if source != destination: if not os.path.exists(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copyfile(source, destination)
def copy(source, destination)
Copy file from source to destination if needed (skip if source is destination).
1.723889
1.735819
0.993127
try: super(BaseDrmaaManager, self).shutdown(timeout) except Exception: pass self.drmaa_session.close()
def shutdown(self, timeout=None)
Cleanup DRMAA session and call shutdown of parent.
5.659415
3.732976
1.51606
destination = self.__destination(ip, path) atomicish_move(local_path, destination)
def cache_file(self, local_path, ip, path)
Move a file from a temporary staging area into the cache.
14.113071
11.825396
1.193454
# Load default options from config file that apply to all # managers. default_options = _get_default_options(conf) manager_descriptions = ManagerDescriptions() if "job_managers_config" in conf: job_managers_config = conf.get("job_managers_config", None) _populate_manager_descriptions_from_ini(manager_descriptions, job_managers_config) elif "managers" in conf: for manager_name, manager_options in conf["managers"].items(): manager_description = ManagerDescription.from_dict(manager_options, manager_name) manager_descriptions.add(manager_description) elif "manager" in conf: manager_description = ManagerDescription.from_dict(conf["manager"]) manager_descriptions.add(manager_description) else: manager_descriptions.add(ManagerDescription()) manager_classes = _get_managers_dict() managers = {} for manager_name, manager_description in manager_descriptions.descriptions.items(): manager_options = dict(default_options) manager_options.update(manager_description.manager_options) manager_class = manager_classes[manager_description.manager_type] manager = _build_manager(manager_class, app, manager_name, manager_options) managers[manager_name] = manager return managers
def build_managers(app, conf)
Takes in a config file as outlined in job_managers.ini.sample and builds a dictionary of job manager objects from them.
2.412553
2.287035
1.054883
if exc_info is None: exc_info = sys.exc_info() if (exc_info[0] != TypeError or str(exc_info[1]).find('arguments') == -1 or getattr(exc_info[1], '_type_error_fixed', False)): return exc_info exc_info[1]._type_error_fixed = True argspec = inspect.formatargspec(*inspect.getargspec(callable)) args = ', '.join(map(_short_repr, varargs)) if kwargs and args: args += ', ' if kwargs: kwargs = kwargs.items() kwargs.sort() args += ', '.join(['%s=...' % n for n, v in kwargs]) gotspec = '(%s)' % args msg = '%s; got %s, wanted %s' % (exc_info[1], gotspec, argspec) exc_info[1].args = (msg,) return exc_info
def fix_type_error(exc_info, callable, varargs, kwargs)
Given an exception, this will test if the exception was due to a signature error, and annotate the error with better information if so. Usage:: try: val = callable(*args, **kw) except TypeError: exc_info = fix_type_error(None, callable, args, kw) raise exc_info[0], exc_info[1], exc_info[2]
2.63078
2.703548
0.973084
try: val = callable(*args, **kw) except TypeError: exc_info = fix_type_error(None, callable, args, kw) reraise(*exc_info) return val
def fix_call(callable, *args, **kw)
Call ``callable(*args, **kw)`` fixing any type errors that come out.
3.571508
3.607892
0.989915
parts, target = spec.split(':') if ':' in spec else (spec, None) module = __import__(parts) for part in parts.split('.')[1:] + ([target] if target else []): module = getattr(module, part) return module
def lookup_object(spec)
Looks up a module or object from a some.module:func_name specification. To just look up a module, omit the colon and everything after it.
3.414447
2.997262
1.139188
if not isinstance(lst, (list, tuple)): return [lst] result = [] for item in lst: result.extend(_flatten(item)) return result
def _flatten(lst)
Flatten a nested list.
2.050365
1.871405
1.095629
defaults = ConfigParser.defaults(self).copy() for key, val in iteritems(defaults): defaults[key] = self.get('DEFAULT', key) or val return defaults
def defaults(self)
Return the defaults, with their values interpolated (with the defaults dict itself) Mainly to support defaults using values such as %(here)s
5.451375
4.826343
1.129504
possible = [] for name_options in object_type.config_prefixes: for name_prefix in name_options: found = self._find_sections( self.parser.sections(), name_prefix, name) if found: possible.extend(found) break if not possible: raise LookupError( "No section %r (prefixed by %s) found in config %s" % (name, ' or '.join(map(repr, _flatten(object_type.config_prefixes))), self.filename)) if len(possible) > 1: raise LookupError( "Ambiguous section names %r for section %r (prefixed by %s) " "found in config %s" % (possible, name, ' or '.join(map(repr, _flatten(object_type.config_prefixes))), self.filename)) return possible[0]
def find_config_section(self, object_type, name=None)
Return the section name with the given name prefix (following the same pattern as ``protocol_desc`` in ``config``. It must have the given name, or for ``'main'`` an empty name is allowed. The prefix must be followed by a ``:``. Case is *not* ignored.
2.621067
2.701004
0.970405
if name is None: name = 'main' possible = [] for protocol_options in object_type.egg_protocols: for protocol in protocol_options: pkg_resources.require(self.spec) entry = pkg_resources.get_entry_info( self.spec, protocol, name) if entry is not None: possible.append((entry.load(), protocol, entry.name)) break if not possible: # Better exception dist = pkg_resources.get_distribution(self.spec) raise LookupError( "Entry point %r not found in egg %r (dir: %s; protocols: %s; " "entry_points: %s)" % (name, self.spec, dist.location, ', '.join(_flatten(object_type.egg_protocols)), ', '.join(_flatten([ dictkeys(pkg_resources.get_entry_info(self.spec, prot, name) or {}) for prot in protocol_options] or '(no entry points)')))) if len(possible) > 1: raise LookupError( "Ambiguous entry points for %r in egg %r (protocols: %s)" % (name, self.spec, ', '.join(_flatten(protocol_options)))) return possible[0]
def find_egg_entry_point(self, object_type, name=None)
Returns the (entry_point, protocol) for the with the given ``name``.
3.499435
3.421156
1.022881
directory, allow_nested_files = self._directory_for_file_type(input_type) return self.path_helper.remote_join(directory, remote_relative_path)
def calculate_path(self, remote_relative_path, input_type)
Only for used by Pulsar client, should override for managers to enforce security and make the directory if needed.
7.189718
6.728343
1.068572
job_directory = self._proxied_manager.job_directory(job_id) with job_directory.lock("status"): proxy_status, state_change = self.__proxy_status(job_directory, job_id) if state_change == "to_complete": self.__deactivate(job_id, proxy_status) elif state_change == "to_running": self.__state_change_callback(status.RUNNING, job_id) return self.__status(job_directory, proxy_status)
def get_status(self, job_id)
Compute status used proxied manager and handle state transitions and track additional state information needed.
5.13112
4.229001
1.213317
state_change = None if job_directory.has_metadata(JOB_FILE_PREPROCESSING_FAILED): proxy_status = status.FAILED job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status) state_change = "to_complete" elif not job_directory.has_metadata(JOB_FILE_PREPROCESSED): proxy_status = status.PREPROCESSING elif job_directory.has_metadata(JOB_FILE_FINAL_STATUS): proxy_status = job_directory.load_metadata(JOB_FILE_FINAL_STATUS) else: proxy_status = self._proxied_manager.get_status(job_id) if proxy_status == status.RUNNING: if not job_directory.has_metadata(JOB_METADATA_RUNNING): job_directory.store_metadata(JOB_METADATA_RUNNING, True) state_change = "to_running" elif proxy_status in [status.COMPLETE, status.CANCELLED]: job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status) state_change = "to_complete" return proxy_status, state_change
def __proxy_status(self, job_directory, job_id)
Determine state with proxied job manager and if this job needs to be marked as deactivated (this occurs when job first returns a complete status from proxy.
2.404768
2.299679
1.045697
if proxy_status == status.COMPLETE: if not job_directory.has_metadata(JOB_FILE_POSTPROCESSED): job_status = status.POSTPROCESSING else: job_status = status.COMPLETE else: job_status = proxy_status return job_status
def __status(self, job_directory, proxy_status)
Use proxied manager's status to compute the real (stateful) status of job.
3.699018
3.76537
0.982378
output_directory = dirname(output_file) def local_path(name): return join(output_directory, self.path_helper.local_name(name)) files_directory = "%s_files%s" % (basename(output_file)[0:-len(".dat")], self.path_helper.separator) names = filter(lambda o: o.startswith(files_directory), self.output_directory_contents) return dict(map(lambda name: (local_path(name), name), names))
def output_extras(self, output_file)
Returns dict mapping local path to remote name.
4.518196
3.890561
1.161322
user = kwargs.get("user", None) full_command = [SUDO_PATH, SUDO_PRESERVE_ENVIRONMENT_ARG] if user: full_command.extend([SUDO_USER_ARG, user]) full_command.extend(args) log.info("About to execute the following sudo command - [%s]" % ' '.join(full_command)) p = Popen(full_command, shell=False, stdout=PIPE, stderr=PIPE) return p
def sudo_popen(*args, **kwargs)
Helper method for building and executing Popen command. This is potentially sensetive code so should probably be centralized.
3.406592
3.293792
1.034246
# Normally, we have separate buckets for bugfixes vs features keys = ['unreleased_bugfix', 'unreleased_feature'] # But unstable prehistorical releases roll all up into just # 'unreleased' if major_number == 0 and self.config.releases_unstable_prehistory: keys = ['unreleased'] # Either way, the buckets default to an empty list self[major_number] = {key: [] for key in keys}
def add_family(self, major_number)
Expand to a new release line with given ``major_number``. This will flesh out mandatory buckets like ``unreleased_bugfix`` and do other necessary bookkeeping.
11.469995
8.991807
1.275605
nonzeroes = self.stable_families # Nothing but 0.x releases -> yup we're prehistory if not nonzeroes: return False # Presumably, if there's >1 major family besides 0.x, we're at least # one release into the 1.0 (or w/e) line. if len(nonzeroes) > 1: return True # If there's only one, we may still be in the space before its N.0.0 as # well; we can check by testing for existence of bugfix buckets return any( x for x in self[nonzeroes[0]] if not x.startswith('unreleased') )
def has_stable_releases(self)
Returns whether stable (post-0.x) releases seem to exist.
13.416802
11.850894
1.132134
app, doctree = get_doctree(path, **kwargs) # Have to semi-reproduce the 'find first bullet list' bit from main code, # which is unfortunately side-effect-heavy (thanks to Sphinx plugin # design). first_list = None for node in doctree[0]: if isinstance(node, bullet_list): first_list = node break # Initial parse into the structures Releases finds useful internally releases, manager = construct_releases(first_list.children, app) ret = changelog2dict(releases) # Stitch them together into something an end-user would find better: # - nuke unreleased_N.N_Y as their contents will be represented in the # per-line buckets for key in ret.copy(): if key.startswith('unreleased'): del ret[key] for family in manager: # - remove unreleased_bugfix, as they are accounted for in the per-line # buckets too. No need to store anywhere. manager[family].pop('unreleased_bugfix', None) # - bring over each major family's unreleased_feature as # unreleased_N_feature unreleased = manager[family].pop('unreleased_feature', None) if unreleased is not None: ret['unreleased_{}_feature'.format(family)] = unreleased # - bring over all per-line buckets from manager (flattening) # Here, all that's left in the per-family bucket should be lines, not # unreleased_* ret.update(manager[family]) return ret
def parse_changelog(path, **kwargs)
Load and parse changelog file from ``path``, returning data structures. This function does not alter any files on disk; it is solely for introspecting a Releases ``changelog.rst`` and programmatically answering questions like "are there any unreleased bugfixes for the 2.3 line?" or "what was included in release 1.2.1?". For example, answering the above questions is as simple as:: changelog = parse_changelog("/path/to/changelog") print("Unreleased issues for 2.3.x: {}".format(changelog['2.3'])) print("Contents of v1.2.1: {}".format(changelog['1.2.1'])) Aside from the documented arguments, any additional keyword arguments are passed unmodified into an internal `get_doctree` call (which then passes them to `make_app`). :param str path: A relative or absolute file path string. :returns: A dict whose keys map to lists of ``releases.models.Issue`` objects, as follows: - Actual releases are full version number keys, such as ``"1.2.1"`` or ``"2.0.0"``. - Unreleased bugs (or bug-like issues; see the Releases docs) are stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``. - Unreleased features (or feature-like issues) are found in ``"unreleased_N_feature"``, where ``N`` is one of the major release families (so, a changelog spanning only 1.x will only have ``unreleased_1_feature``, whereas one with 1.x and 2.x releases will have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc). .. versionchanged:: 1.6 Added support for passing kwargs to `get_doctree`/`make_app`.
9.781712
8.245763
1.186271
root, filename = os.path.split(path) docname, _ = os.path.splitext(filename) # TODO: this only works for top level changelog files (i.e. ones where # their dirname is the project/doc root) app = make_app(srcdir=root, **kwargs) # Create & init a BuildEnvironment. Mm, tasty side effects. app._init_env(freshenv=True) env = app.env # More arity/API changes: Sphinx 1.3/1.4-ish require one to pass in the app # obj in BuildEnvironment.update(); modern Sphinx performs that inside # Application._init_env() (which we just called above) and so that kwarg is # removed from update(). EAFP. kwargs = dict( config=app.config, srcdir=root, doctreedir=app.doctreedir, app=app, ) try: env.update(**kwargs) except TypeError: # Assume newer Sphinx w/o an app= kwarg del kwargs['app'] env.update(**kwargs) # Code taken from sphinx.environment.read_doc; easier to manually call # it with a working Environment object, instead of doing more random crap # to trick the higher up build system into thinking our single changelog # document was "updated". env.temp_data['docname'] = docname env.app = app # NOTE: SphinxStandaloneReader API changed in 1.4 :( reader_kwargs = { 'app': app, 'parsers': env.config.source_parsers, } if sphinx.version_info[:2] < (1, 4): del reader_kwargs['app'] # This monkeypatches (!!!) docutils to 'inject' all registered Sphinx # domains' roles & so forth. Without this, rendering the doctree lacks # almost all Sphinx magic, including things like :ref: and :doc:! with sphinx_domains(env): try: reader = SphinxStandaloneReader(**reader_kwargs) except TypeError: # If we import from io, this happens automagically, not in API del reader_kwargs['parsers'] reader = SphinxStandaloneReader(**reader_kwargs) pub = Publisher(reader=reader, writer=SphinxDummyWriter(), destination_class=NullOutput) pub.set_components(None, 'restructuredtext', None) pub.process_programmatic_settings(None, env.settings, None) # NOTE: docname derived higher up, from our given path src_path = env.doc2path(docname) source = SphinxFileInput( app, env, source=None, source_path=src_path, encoding=env.config.source_encoding, ) pub.source = source pub.settings._source = src_path pub.set_destination(None, None) pub.publish() return app, pub.document
def get_doctree(path, **kwargs)
Obtain a Sphinx doctree from the RST file at ``path``. Performs no Releases-specific processing; this code would, ideally, be in Sphinx itself, but things there are pretty tightly coupled. So we wrote this. Any additional kwargs are passed unmodified into an internal `make_app` call. :param str path: A relative or absolute file path string. :returns: A two-tuple of the generated ``sphinx.application.Sphinx`` app and the doctree (a ``docutils.document`` object). .. versionchanged:: 1.6 Added support for passing kwargs to `make_app`.
7.638913
7.545149
1.012427
path = os.path.join(srcdir, 'conf.py') mylocals = {'__file__': path} with open(path) as fd: exec(fd.read(), mylocals) return mylocals
def load_conf(srcdir)
Load ``conf.py`` from given ``srcdir``. :returns: Dictionary derived from the conf module.
2.78579
3.429704
0.812254
if config.releases_debug: sys.stderr.write(str(txt) + "\n") sys.stderr.flush()
def _log(txt, config)
Log debug output if debug setting is on. Intended to be partial'd w/ config at top of functions. Meh.
5.394867
5.899962
0.91439
# Both 'spec' formats are wrapped in parens, discard keyword = keyword.lstrip('(').rstrip(')') # First, test for intermediate '1.2+' style matches = release_line_re.findall(keyword) if matches: return Spec(">={}".format(matches[0])) # Failing that, see if Spec can make sense of it try: return Spec(keyword) # I've only ever seen Spec fail with ValueError. except ValueError: return None
def scan_for_spec(keyword)
Attempt to return some sort of Spec from given keyword value. Returns None if one could not be derived.
10.151663
10.714064
0.947508
parts = utils.unescape(text).split() issue_no = parts.pop(0) # Lol @ access back to Sphinx config = inliner.document.settings.env.app.config if issue_no not in ('-', '0'): ref = None if config.releases_issue_uri: # TODO: deal with % vs .format() ref = config.releases_issue_uri % issue_no elif config.releases_github_path: ref = "https://github.com/{}/issues/{}".format( config.releases_github_path, issue_no) # Only generate a reference/link if we were able to make a URI if ref: identifier = nodes.reference( rawtext, '#' + issue_no, refuri=ref, **options ) # Otherwise, just make it regular text else: identifier = nodes.raw( rawtext=rawtext, text='#' + issue_no, format='html', **options ) else: identifier = None issue_no = None # So it doesn't gum up dupe detection later # Additional 'new-style changelog' stuff if name in ISSUE_TYPES: nodelist = issue_nodelist(name, identifier) spec = None keyword = None # TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1 # instance of specs, etc. for part in parts: maybe_spec = scan_for_spec(part) if maybe_spec: spec = maybe_spec else: if part in ('backported', 'major'): keyword = part else: err = "Gave unknown keyword {!r} for issue no. {}" raise ValueError(err.format(keyword, issue_no)) # Create temporary node w/ data & final nodes to publish node = Issue( number=issue_no, type_=name, nodelist=nodelist, backported=(keyword == 'backported'), major=(keyword == 'major'), spec=spec, ) return [node], [] # Return old style info for 'issue' for older changelog entries else: return [identifier], []
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[])
Use: :issue|bug|feature|support:`ticket_number` When invoked as :issue:, turns into just a "#NN" hyperlink to `releases_issue_uri`. When invoked otherwise, turns into "[Type] <#NN hyperlink>: ". Spaces present in the "ticket number" are used as fields for keywords (major, backported) and/or specs (e.g. '>=1.0'). This data is removed & used when constructing the object. May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
5.950758
5.505797
1.080817
# Make sure year has been specified match = year_arg_re.match(text) if not match: msg = inliner.reporter.error("Must specify release date!") return [inliner.problematic(rawtext, rawtext, msg)], [msg] number, date = match.group(1), match.group(2) # Lol @ access back to Sphinx config = inliner.document.settings.env.app.config nodelist = [release_nodes(number, number, date, config)] # Return intermediate node node = Release(number=number, date=date, nodelist=nodelist) return [node], []
def release_role(name, rawtext, text, lineno, inliner, options={}, content=[])
Invoked as :release:`N.N.N <YYYY-MM-DD>`. Turns into useful release header + link to GH tree for the tag.
5.564233
5.888581
0.944919
for family, lines in six.iteritems(manager): for type_ in ('bugfix', 'feature'): bucket = 'unreleased_{}'.format(type_) if bucket not in lines: # Implies unstable prehistory + 0.x fam continue issues = lines[bucket] fam_prefix = "{}.x ".format(family) if len(manager) > 1 else "" header = "Next {}{} release".format(fam_prefix, type_) line = "unreleased_{}.x_{}".format(family, type_) releases.append( generate_unreleased_entry(header, line, issues, manager, app) )
def append_unreleased_entries(app, manager, releases)
Generate new abstract 'releases' for unreleased issues. There's one for each combination of bug-vs-feature & major release line. When only one major release line exists, that dimension is ignored.
8.302969
8.357584
0.993465
order = {'feature': 0, 'bug': 1, 'support': 2} for release in releases: entries = release['entries'][:] release['entries'] = sorted(entries, key=lambda x: order[x.type])
def reorder_release_entries(releases)
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
3.37888
2.588592
1.305297
# It's remotely possible the changelog is totally empty... if not entries: return # Obtain (short-circuiting) first Release obj. first_release = None for obj in entries: if isinstance(obj, Release): first_release = obj break # It's also possible it's non-empty but has no releases yet. if first_release: manager.add_family(obj.family) # If God did not exist, man would be forced to invent him. else: manager.add_family(0)
def handle_first_release_line(entries, manager)
Set up initial line-manager entry for first encountered release line. To be called at start of overall process; afterwards, subsequent major lines are generated by `handle_upcoming_major_release`.
9.571242
9.846697
0.972026
# TODO: yea deffo need a real object for 'manager', heh. E.g. we do a # very similar test for "do you have any actual releases yet?" # elsewhere. (This may be fodder for changing how we roll up # pre-major-release features though...?) return [ key for key, value in six.iteritems(manager) if any(x for x in value if not x.startswith('unreleased')) ]
def minor_releases(self, manager)
Return all minor release line labels found in ``manager``.
22.908356
21.458511
1.067565
# TODO: I feel like this + the surrounding bits in add_to_manager() # could be consolidated & simplified... specstr = "" # Make sure truly-default spec skips 0.x if prehistory was unstable. stable_families = manager.stable_families if manager.config.releases_unstable_prehistory and stable_families: specstr = ">={}".format(min(stable_families)) if self.is_featurelike: # TODO: if app->config-><releases_always_forwardport_features or # w/e if True: specstr = ">={}".format(max(manager.keys())) else: # Can only meaningfully limit to minor release buckets if they # actually exist yet. buckets = self.minor_releases(manager) if buckets: specstr = ">={}".format(max(buckets)) return Spec(specstr) if specstr else Spec()
def default_spec(self, manager)
Given the current release-lines structure, return a default Spec. Specifics: * For feature-like issues, only the highest major release is used, so given a ``manager`` with top level keys of ``[1, 2]``, this would return ``Spec(">=2")``. * When ``releases_always_forwardport_features`` is ``True``, that behavior is nullified, and this function always returns the empty ``Spec`` (which matches any and all versions/lines). * For bugfix-like issues, we only consider major release families which have actual releases already. * Thus the core difference here is that features are 'consumed' by upcoming major releases, and bugfixes are not. * When the ``unstable_prehistory`` setting is ``True``, the default spec starts at the oldest non-zero release line. (Otherwise, issues posted after prehistory ends would try being added to the 0.x part of the tree, which makes no sense in unstable-prehistory mode.)
13.320141
8.454879
1.575438
# Derive version spec allowing us to filter against major/minor buckets spec = self.spec or self.default_spec(manager) # Only look in appropriate major version/family; if self is an issue # declared as living in e.g. >=2, this means we don't even bother # looking in the 1.x family. families = [Version(str(x)) for x in manager] versions = list(spec.filter(families)) for version in versions: family = version.major # Within each family, we further limit which bugfix lines match up # to what self cares about (ignoring 'unreleased' until later) candidates = [ Version(x) for x in manager[family] if not x.startswith('unreleased') ] # Select matching release lines (& stringify) buckets = [] bugfix_buckets = [str(x) for x in spec.filter(candidates)] # Add back in unreleased_* as appropriate # TODO: probably leverage Issue subclasses for this eventually? if self.is_buglike: buckets.extend(bugfix_buckets) # Don't put into JUST unreleased_bugfix; it implies that this # major release/family hasn't actually seen any releases yet # and only exists for features to go into. if bugfix_buckets: buckets.append('unreleased_bugfix') # Obtain list of minor releases to check for "haven't had ANY # releases yet" corner case, in which case ALL issues get thrown in # unreleased_feature for the first release to consume. # NOTE: assumes first release is a minor or major one, # but...really? why would your first release be a bugfix one?? no_releases = not self.minor_releases(manager) if self.is_featurelike or self.backported or no_releases: buckets.append('unreleased_feature') # Now that we know which buckets are appropriate, add ourself to # all of them. TODO: or just...do it above...instead... for bucket in buckets: manager[family][bucket].append(self)
def add_to_manager(self, manager)
Given a 'manager' structure, add self to one or more of its 'buckets'.
12.054793
11.713981
1.029095
reader = Rtf15Reader(source, errors, clean_paragraphs) return reader.go()
def read(self, source, errors='strict', clean_paragraphs=True)
source: A list of P objects.
8.99828
9.109562
0.987784
runs = self.block.content if not runs: self.block = None return if not self.clean_paragraphs: return joinedRuns = [] hasContent = False for run in runs: if run.content[0]: hasContent = True else: continue # For whitespace-only groups, remove any property stuff, # to avoid extra markup in output if not run.content[0].strip(): run.properties = {} # Join runs only if their properties match if joinedRuns and (run.properties == joinedRuns[-1].properties): joinedRuns[-1].content[0] += run.content[0] else: joinedRuns.append(run) if hasContent: # Strip beginning of paragraph joinedRuns[0].content[0] = joinedRuns[0].content[0].lstrip() # And then strip the end joinedRuns[-1].content[0] = joinedRuns[-1].content[0].rstrip() self.block.content = joinedRuns else: self.block = None
def cleanParagraph(self)
Compress text runs, remove whitespace at start and end, skip empty blocks, etc
3.71201
3.471352
1.069327
rulesets = self.ruleset_re.findall(css) for (selector, declarations) in rulesets: rule = Rule(self.parse_selector(selector)) rule.properties = self.parse_declarations(declarations) self.rules.append(rule)
def parse_css(self, css)
Parse a css style sheet into the CSS object. For the moment this will only work for very simple css documents. It works by using regular expression matching css syntax. This is not bullet proof.
3.176522
3.48911
0.910411
declarations = self.declaration_re.findall(declarations) return dict(declarations)
def parse_declarations(self, declarations)
parse a css declaration list
7.928678
6.245009
1.269602
tag, klass = self.selector_re.match(selector).groups() return Selector(tag, klass)
def parse_selector(self, selector)
parse a css selector
6.127836
5.908781
1.037073
ret = {} # Try all the rules one by one for rule in self.rules: if rule.selector(node): ret.update(rule.properties) # Also search for direct 'style' arguments in the html doc for style_node in node.findParents(attrs={'style': True}): style = style_node.get('style') properties = self.parse_declarations(style) ret.update(properties) return ret
def get_properties(self, node)
return a dict of all the properties of a given BeautifulSoup node found by applying the css style.
6.121821
5.155756
1.187376
topLevel = __import__(name) packages = name.split(".")[1:] m = topLevel for p in packages: m = getattr(m, p) return m
def namedModule(name)
Return a module given its name.
3.177574
3.050545
1.041641
classSplit = name.split('.') module = namedModule('.'.join(classSplit[:-1])) return getattr(module, classSplit[-1])
def namedObject(name)
Get a fully named module-global object.
4.726611
4.758472
0.993304
ret = u"".join(text.content) if 'url' in text.properties: return u"`%s`_" % ret if 'bold' in text.properties: return u"**%s**" % ret if 'italic' in text.properties: return u"*%s*" % ret if 'sub' in text.properties: return ur"\ :sub:`%s`\ " % ret if 'super' in text.properties: return ur"\ :sup:`%s`\ " % ret return ret
def text(self, text)
process a pyth text and return the formatted string
2.813033
2.74017
1.026591
content = [] for text in paragraph.content: content.append(self.text(text)) content = u"".join(content).encode("utf-8") for line in content.split("\n"): self.target.write(" " * self.indent) self.target.write(prefix) self.target.write(line) self.target.write("\n") if prefix: prefix = " " # handle the links if any('url' in text.properties for text in paragraph.content): self.target.write("\n") for text in paragraph.content: if 'url' in text.properties: string = u"".join(text.content) url = text.properties['url'] self.target.write(".. _%s: %s\n" % (string, url))
def paragraph(self, paragraph, prefix="")
process a pyth paragraph into the target
2.806448
2.733048
1.026857
self.indent += 1 for (i, entry) in enumerate(list.content): for (j, paragraph) in enumerate(entry.content): prefix = "- " if j == 0 else " " handler = self.paragraphDispatch[paragraph.__class__] handler(paragraph, prefix) self.target.write("\n") self.indent -= 1
def list(self, list, prefix=None)
Process a pyth list into the target
4.9511
4.682887
1.057275
# Remove all the newline characters before a closing tag. for node in soup.findAll(text=True): if node.rstrip(" ").endswith("\n"): node.replaceWith(node.rstrip(" ").rstrip("\n")) # Join the block elements lines into a single long line for tag in ['p', 'li']: for node in soup.findAll(tag): text = unicode(node) lines = [x.strip() for x in text.splitlines()] text = ' '.join(lines) node.replaceWith(BeautifulSoup.BeautifulSoup(text)) soup = BeautifulSoup.BeautifulSoup(unicode(soup)) # replace all <br/> tag by newline character for node in soup.findAll('br'): node.replaceWith("\n") soup = BeautifulSoup.BeautifulSoup(unicode(soup)) return soup
def format(self, soup)
format a BeautifulSoup document This will transform the block elements content from multi-lines text into single line. This allow us to avoid having to deal with further text rendering once this step has been done.
3.406382
3.200675
1.06427
a_node = node.findParent('a') if not a_node: return None if self.link_callback is None: return a_node.get('href') else: return self.link_callback(a_node.get('href'))
def url(self, node)
return the url of a BeautifulSoup node or None if there is no url.
3.183634
2.986357
1.06606
text = node.string.strip() if not text: return # Set all the properties properties=dict() if self.is_bold(node): properties['bold'] = True if self.is_italic(node): properties['italic'] = True if self.url(node): properties['url'] = self.url(node) if self.is_sub(node): properties['sub'] = True if self.is_super(node): properties['super'] = True content=[node.string] return document.Text(properties, content)
def process_text(self, node)
Return a pyth Text object from a BeautifulSoup node or None if the text is empty.
3.159038
3.014474
1.047957
if isinstance(node, BeautifulSoup.NavigableString): text = self.process_text(node) if text: obj.append(text) return if node.name == 'p': # add a new paragraph into the pyth object new_obj = document.Paragraph() obj.append(new_obj) obj = new_obj elif node.name == 'ul': # add a new list new_obj = document.List() obj.append(new_obj) obj = new_obj elif node.name == 'li': # add a new list entry new_obj = document.ListEntry() obj.append(new_obj) obj = new_obj for child in node: self.process_into(child, obj)
def process_into(self, node, obj)
Process a BeautifulSoup node and fill its elements into a pyth base object.
2.315276
2.063983
1.121751
okay = True if not isinstance(item, self.contentType): if hasattr(self.contentType, 'contentType'): try: item = self.contentType(content=[item]) except TypeError: okay = False else: okay = False if not okay: raise TypeError("Wrong content type for %s: %s (%s)" % ( self.__class__.__name__, repr(type(item)), repr(item))) self.content.append(item)
def append(self, item)
Try to add an item to this element. If the item is of the wrong type, and if this element has a sub-type, then try to create such a sub-type and insert the item into that, instead. This happens recursively, so (in python-markup): L [ u'Foo' ] actually creates: L [ LE [ P [ T [ u'Foo' ] ] ] ] If that doesn't work, raise a TypeError.
3.597485
2.918385
1.232697
class MagicGetItem(type): def __new__(mcs, name, bases, dict): klass = type.__new__(mcs, name, bases, dict) mcs.__getitem__ = lambda _, k: klass()[k] return klass return MagicGetItem
def _MetaPythonBase()
Return a metaclass which implements __getitem__, allowing e.g. P[...] instead of P()[...]
4.580749
3.848659
1.19022
writer = LatexWriter(document, target, stylesheet) return writer.go()
def write(klass, document, target=None, stylesheet="")
convert a pyth document to a latex document we can specify a stylesheet as a latex document fragment that will be inserted after the headers. This way we can override the default style.
14.064671
10.954391
1.28393
latex_fragment = r % (self.document.properties.get("title"), self.document.properties.get("author"), self.document.properties.get("subject")) return latex_fragment + self.stylesheet
def full_stylesheet(self)
Return the style sheet that will ultimately be inserted into the latex document. This is the user given style sheet plus some additional parts to add the meta data.
7.021184
6.006565
1.168918
url = self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/stream' if stream_id: url = url + '/' + stream_id return url
def get_stream_url(self, session_id, stream_id=None)
this method returns the url to get streams information
2.798815
2.670093
1.048209
url = ( self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/connection/' + connection_id ) return url
def force_disconnect_url(self, session_id, connection_id)
this method returns the force disconnect url endpoint
4.019549
3.896175
1.031666
url = self.api_url + '/v2/project/' + self.api_key + '/archive/' + archive_id + '/layout' return url
def set_archive_layout_url(self, archive_id)
this method returns the url to set the archive layout
4.082083
3.518374
1.160219
url = self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/stream' return url
def set_stream_class_lists_url(self, session_id)
this method returns the url to set the stream class list
4.610386
3.952464
1.166459
url = self.api_url + '/v2/project/' + self.api_key + '/broadcast' if broadcast_id: url = url + '/' + broadcast_id if stop: url = url + '/stop' if layout: url = url + '/layout' return url
def broadcast_url(self, broadcast_id=None, stop=False, layout=False)
this method returns urls for working with broadcast
2.406615
2.396584
1.004185
temp_archive = self.sdk.stop_archive(self.id) for k,v in iteritems(temp_archive.attrs()): setattr(self, k, v)
def stop(self)
Stops an OpenTok archive that is being recorded. Archives automatically stop recording after 120 minutes or when all clients have disconnected from the session being archived.
8.651783
6.809573
1.270532
return dict((k, v) for k, v in iteritems(self.__dict__) if k is not "sdk")
def attrs(self)
Returns a dictionary of the archive's attributes.
7.536469
7.081398
1.064263
if not isinstance(output_mode, OutputModes): raise OpenTokException(u('Cannot start archive, {0} is not a valid output mode').format(output_mode)) if resolution and output_mode == OutputModes.individual: raise OpenTokException(u('Invalid parameters: Resolution cannot be supplied for individual output mode.')) payload = {'name': name, 'sessionId': session_id, 'hasAudio': has_audio, 'hasVideo': has_video, 'outputMode': output_mode.value, 'resolution': resolution, } response = requests.post(self.endpoints.archive_url(), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout) if response.status_code < 300: return Archive(self, response.json()) elif response.status_code == 403: raise AuthError() elif response.status_code == 400: raise RequestError(response.json().get("message")) elif response.status_code == 404: raise NotFoundError("Session not found") elif response.status_code == 409: raise ArchiveError(response.json().get("message")) else: raise RequestError("An unexpected error occurred", response.status_code)
def start_archive(self, session_id, has_audio=True, has_video=True, name=None, output_mode=OutputModes.composed, resolution=None)
Starts archiving an OpenTok session. Clients must be actively connected to the OpenTok session for you to successfully start recording an archive. You can only record one archive at a time for a given session. You can only record archives of sessions that use the OpenTok Media Router (sessions with the media mode set to routed); you cannot archive sessions with the media mode set to relayed. For more information on archiving, see the `OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide. :param String session_id: The session ID of the OpenTok session to archive. :param String name: This is the name of the archive. You can use this name to identify the archive. It is a property of the Archive object, and it is a property of archive-related events in the OpenTok.js library. :param Boolean has_audio: if set to True, an audio track will be inserted to the archive. has_audio is an optional parameter that is set to True by default. If you set both has_audio and has_video to False, the call to the start_archive() method results in an error. :param Boolean has_video: if set to True, a video track will be inserted to the archive. has_video is an optional parameter that is set to True by default. :param OutputModes output_mode: Whether all streams in the archive are recorded to a single file (OutputModes.composed, the default) or to individual files (OutputModes.individual). :param String resolution (Optional): The resolution of the archive, either "640x480" (the default) or "1280x720". This parameter only applies to composed archives. If you set this parameter and set the output_mode parameter to OutputModes.individual, the call to the start_archive() method results in an error. :rtype: The Archive object, which includes properties defining the archive, including the archive ID.
2.467313
2.486338
0.992348
response = requests.post(self.endpoints.archive_url(archive_id) + '/stop', headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout) if response.status_code < 300: return Archive(self, response.json()) elif response.status_code == 403: raise AuthError() elif response.status_code == 404: raise NotFoundError("Archive not found") elif response.status_code == 409: raise ArchiveError("Archive is not in started state") else: raise RequestError("An unexpected error occurred", response.status_code)
def stop_archive(self, archive_id)
Stops an OpenTok archive that is being recorded. Archives automatically stop recording after 90 minutes or when all clients have disconnected from the session being archived. @param [String] archive_id The archive ID of the archive you want to stop recording. :rtype: The Archive object corresponding to the archive being stopped.
2.578442
2.668213
0.966355
response = requests.delete(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout) if response.status_code < 300: pass elif response.status_code == 403: raise AuthError() elif response.status_code == 404: raise NotFoundError("Archive not found") else: raise RequestError("An unexpected error occurred", response.status_code)
def delete_archive(self, archive_id)
Deletes an OpenTok archive. You can only delete an archive which has a status of "available" or "uploaded". Deleting an archive removes its record from the list of archives. For an "available" archive, it also removes the archive file, making it unavailable for download. :param String archive_id: The archive ID of the archive to be deleted.
2.697338
2.784897
0.968559
response = requests.get(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout) if response.status_code < 300: return Archive(self, response.json()) elif response.status_code == 403: raise AuthError() elif response.status_code == 404: raise NotFoundError("Archive not found") else: raise RequestError("An unexpected error occurred", response.status_code)
def get_archive(self, archive_id)
Gets an Archive object for the given archive ID. :param String archive_id: The archive ID. :rtype: The Archive object.
2.56777
2.67794
0.95886
params = {} if offset is not None: params['offset'] = offset if count is not None: params['count'] = count if session_id is not None: params['sessionId'] = session_id endpoint = self.endpoints.archive_url() + "?" + urlencode(params) response = requests.get( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code < 300: return ArchiveList(self, response.json()) elif response.status_code == 403: raise AuthError() elif response.status_code == 404: raise NotFoundError("Archive not found") else: raise RequestError("An unexpected error occurred", response.status_code)
def get_archives(self, offset=None, count=None, session_id=None)
Returns an ArchiveList, which is an array of archives that are completed and in-progress, for your API key. :param int: offset Optional. The index offset of the first archive. 0 is offset of the most recently started archive. 1 is the offset of the archive that started prior to the most recent archive. If you do not specify an offset, 0 is used. :param int: count Optional. The number of archives to be returned. The maximum number of archives returned is 1000. :param string: session_id Optional. Used to list archives for a specific session ID. :rtype: An ArchiveList object, which is an array of Archive objects.
2.231026
2.272151
0.9819
return self.get_archives(offset, count, session_id)
def list_archives(self, offset=None, count=None, session_id=None)
New method to get archive list, it's alternative to 'get_archives()', both methods exist to have backwards compatible
6.154799
3.963716
1.552785
response = requests.post( self.endpoints.signaling_url(session_id, connection_id), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 204: pass elif response.status_code == 400: raise SignalingError('One of the signal properties - data, type, sessionId or connectionId - is invalid.') elif response.status_code == 403: raise AuthError('You are not authorized to send the signal. Check your authentication credentials.') elif response.status_code == 404: raise SignalingError('The client specified by the connectionId property is not connected to the session.') elif response.status_code == 413: raise SignalingError('The type string exceeds the maximum length (128 bytes), or the data string exceeds the maximum size (8 kB).') else: raise RequestError('An unexpected error occurred', response.status_code)
def signal(self, session_id, payload, connection_id=None)
Send signals to all participants in an active OpenTok session or to a specific client connected to that session. :param String session_id: The session ID of the OpenTok session that receives the signal :param Dictionary payload: Structure that contains both the type and data fields. These correspond to the type and data parameters passed in the client signal received handlers :param String connection_id: The connection_id parameter is an optional string used to specify the connection ID of a client connected to the session. If you specify this value, the signal is sent to the specified client. Otherwise, the signal is sent to all clients connected to the session
3.300059
3.18794
1.03517
endpoint = self.endpoints.get_stream_url(session_id, stream_id) response = requests.get( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: return Stream(response.json()) elif response.status_code == 400: raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.') elif response.status_code == 403: raise AuthError('You passed in an invalid OpenTok API key or JWT token.') elif response.status_code == 408: raise GetStreamError('You passed in an invalid stream ID.') else: raise RequestError('An unexpected error occurred', response.status_code)
def get_stream(self, session_id, stream_id)
Returns an Stream object that contains information of an OpenTok stream: -id: The stream ID -videoType: "camera" or "screen" -name: The stream name (if one was set when the client published the stream) -layoutClassList: It's an array of the layout classes for the stream
3.679168
3.761736
0.978051
endpoint = self.endpoints.get_stream_url(session_id) response = requests.get( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: return StreamList(response.json()) elif response.status_code == 400: raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.') elif response.status_code == 403: raise AuthError('You passed in an invalid OpenTok API key or JWT token.') else: raise RequestError('An unexpected error occurred', response.status_code)
def list_streams(self, session_id)
Returns a list of Stream objects that contains information of all the streams in a OpenTok session, with the following attributes: -count: An integer that indicates the number of streams in the session -items: List of the Stream objects
4.305817
4.611124
0.933789
endpoint = self.endpoints.force_disconnect_url(session_id, connection_id) response = requests.delete( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 204: pass elif response.status_code == 400: raise ForceDisconnectError('One of the arguments - sessionId or connectionId - is invalid.') elif response.status_code == 403: raise AuthError('You are not authorized to forceDisconnect, check your authentication credentials.') elif response.status_code == 404: raise ForceDisconnectError('The client specified by the connectionId property is not connected to the session.') else: raise RequestError('An unexpected error occurred', response.status_code)
def force_disconnect(self, session_id, connection_id)
Sends a request to disconnect a client from an OpenTok session :param String session_id: The session ID of the OpenTok session from which the client will be disconnected :param String connection_id: The connection ID of the client that will be disconnected
3.203261
3.194299
1.002805
payload = { 'type': layout_type, } if layout_type == 'custom': if stylesheet is not None: payload['stylesheet'] = stylesheet endpoint = self.endpoints.set_archive_layout_url(archive_id) response = requests.put( endpoint, data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: pass elif response.status_code == 400: raise ArchiveError('Invalid request. This response may indicate that data in your request data is invalid JSON. It may also indicate that you passed in invalid layout options.') elif response.status_code == 403: raise AuthError('Authentication error.') else: raise RequestError('OpenTok server error.', response.status_code)
def set_archive_layout(self, archive_id, layout_type, stylesheet=None)
Use this method to change the layout of videos in an OpenTok archive :param String archive_id: The ID of the archive that will be updated :param String layout_type: The layout type for the archive. Valid values are: 'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation' :param String stylesheet optional: CSS used to style the custom layout. Specify this only if you set the type property to 'custom'
3.722643
3.843911
0.968452
payload = { 'sessionId': session_id, 'token': token, 'sip': { 'uri': sip_uri } } if 'from' in options: payload['sip']['from'] = options['from'] if 'headers' in options: payload['sip']['headers'] = options['headers'] if 'auth' in options: payload['sip']['auth'] = options['auth'] if 'secure' in options: payload['sip']['secure'] = options['secure'] endpoint = self.endpoints.dial_url() response = requests.post( endpoint, data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: return SipCall(response.json()) elif response.status_code == 400: raise SipDialError('Invalid request. Invalid session ID.') elif response.status_code == 403: raise AuthError('Authentication error.') elif response.status_code == 404: raise SipDialError('The session does not exist.') elif response.status_code == 409: raise SipDialError( 'You attempted to start a SIP call for a session that ' 'does not use the OpenTok Media Router.') else: raise RequestError('OpenTok server error.', response.status_code)
def dial(self, session_id, token, sip_uri, options=[])
Use this method to connect a SIP platform to an OpenTok session. The audio from the end of the SIP call is added to the OpenTok session as an audio-only stream. The OpenTok Media Router mixes audio from other streams in the session and sends the mixed audio to the SIP endpoint :param String session_id: The OpenTok session ID for the SIP call to join :param String token: The OpenTok token to be used for the participant being called :param String sip_uri: The SIP URI to be used as destination of the SIP call initiated from OpenTok to the SIP platform :param Dictionary options optional: Aditional options with the following properties: String 'from': The number or string that will be sent to the final SIP number as the caller Dictionary 'headers': Defines custom headers to be added to the SIP INVITE request initiated from OpenTok to the SIP platform. Each of the custom headers must start with the "X-" prefix, or the call will result in a Bad Request (400) response Dictionary 'auth': Contains the username and password to be used in the the SIP INVITE request for HTTP digest authentication, if it is required by the SIP platform For example: 'auth': { 'username': 'username', 'password': 'password' } Boolean 'secure': A Boolean flag that indicates whether the media must be transmitted encrypted (true) or not (false, the default) :rtype: A SipCall object, which contains data of the SIP call: id, connectionId and streamId
2.331556
2.176618
1.071183
items_payload = {'items': payload} endpoint = self.endpoints.set_stream_class_lists_url(session_id) response = requests.put( endpoint, data=json.dumps(items_payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: pass elif response.status_code == 400: raise SetStreamClassError( 'Invalid request. This response may indicate that data in your request data ' 'is invalid JSON. It may also indicate that you passed in invalid layout options.' ) elif response.status_code == 403: raise AuthError('Authentication error.') else: raise RequestError('OpenTok server error.', response.status_code)
def set_stream_class_lists(self, session_id, payload)
Use this method to change layout classes for OpenTok streams. The layout classes define how the streams are displayed in the layout of a composed OpenTok archive :param String session_id: The ID of the session of the streams that will be updated :param List payload: A list defining the class lists to apply to the streams. Each element in the list is a dictionary with two properties: 'id' and 'layoutClassList'. The 'id' property is the stream ID (a String), and the 'layoutClassList' is an array of class names (Strings) to apply to the stream. For example: payload = [ {'id': '7b09ec3c-26f9-43d7-8197-f608f13d4fb6', 'layoutClassList': ['focus']}, {'id': '567bc941-6ea0-4c69-97fc-70a740b68976', 'layoutClassList': ['top']}, {'id': '307dc941-0450-4c09-975c-705740d08970', 'layoutClassList': ['bottom']} ]
4.025008
4.190212
0.960574
payload = { 'sessionId': session_id } payload.update(options) endpoint = self.endpoints.broadcast_url() response = requests.post( endpoint, data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: return Broadcast(response.json()) elif response.status_code == 400: raise BroadcastError( 'Invalid request. This response may indicate that data in your request data is ' 'invalid JSON. It may also indicate that you passed in invalid layout options. ' 'Or you have exceeded the limit of five simultaneous RTMP streams for an OpenTok ' 'session. Or you specified and invalid resolution.') elif response.status_code == 403: raise AuthError('Authentication error.') elif response.status_code == 409: raise BroadcastError('The broadcast has already started for the session.') else: raise RequestError('OpenTok server error.', response.status_code)
def start_broadcast(self, session_id, options)
Use this method to start a live streaming for an OpenTok session. This broadcasts the session to an HLS (HTTP live streaming) or to RTMP streams. To successfully start broadcasting a session, at least one client must be connected to the session. You can only start live streaming for sessions that use the OpenTok Media Router (with the media mode set to routed); you cannot use live streaming with sessions that have the media mode set to relayed :param String session_id: The session ID of the OpenTok session you want to broadcast :param Dictionary options, with the following properties: Dictionary 'layout' optional: Specify this to assign the initial layout type for the broadcast. Valid values for the layout property are "bestFit", "custom", "horizontalPresentation", "pip" and "verticalPresentation". If you specify a "custom" layout type, set the stylesheet property of the layout object to the stylesheet. If you do not specify an initial layout type, the broadcast stream uses the Best Fit layout type Integer 'maxDuration' optional: The maximum duration for the broadcast, in seconds. The broadcast will automatically stop when the maximum duration is reached. You can set the maximum duration to a value from 60 (60 seconds) to 36000 (10 hours). The default maximum duration is 2 hours (7200 seconds) Dictionary 'outputs': This object defines the types of broadcast streams you want to start (both HLS and RTMP). You can include HLS, RTMP, or both as broadcast streams. If you include RTMP streaming, you can specify up to five target RTMP streams. For each RTMP stream, specify 'serverUrl' (the RTMP server URL), 'streamName' (the stream name, such as the YouTube Live stream name or the Facebook stream key), and (optionally) 'id' (a unique ID for the stream) String 'resolution' optional: The resolution of the broadcast, either "640x480" (SD, the default) or "1280x720" (HD) :rtype A Broadcast object, which contains information of the broadcast: id, sessionId projectId, createdAt, updatedAt, resolution, status and broadcastUrls
3.955947
4.021016
0.983818
endpoint = self.endpoints.broadcast_url(broadcast_id, stop=True) response = requests.post( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: return Broadcast(response.json()) elif response.status_code == 400: raise BroadcastError( 'Invalid request. This response may indicate that data in your request ' 'data is invalid JSON.') elif response.status_code == 403: raise AuthError('Authentication error.') elif response.status_code == 409: raise BroadcastError( 'The broadcast (with the specified ID) was not found or it has already ' 'stopped.') else: raise RequestError('OpenTok server error.', response.status_code)
def stop_broadcast(self, broadcast_id)
Use this method to stop a live broadcast of an OpenTok session :param String broadcast_id: The ID of the broadcast you want to stop :rtype A Broadcast object, which contains information of the broadcast: id, sessionId projectId, createdAt, updatedAt and resolution
3.525816
3.618922
0.974272
endpoint = self.endpoints.broadcast_url(broadcast_id) response = requests.get( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: return Broadcast(response.json()) elif response.status_code == 400: raise BroadcastError( 'Invalid request. This response may indicate that data in your request ' 'data is invalid JSON.') elif response.status_code == 403: raise AuthError('Authentication error.') elif response.status_code == 409: raise BroadcastError('No matching broadcast found (with the specified ID).') else: raise RequestError('OpenTok server error.', response.status_code)
def get_broadcast(self, broadcast_id)
Use this method to get details on a broadcast that is in-progress. :param String broadcast_id: The ID of the broadcast you want to stop :rtype A Broadcast object, which contains information of the broadcast: id, sessionId projectId, createdAt, updatedAt, resolution, broadcastUrls and status
3.546689
3.834526
0.924936
payload = { 'type': layout_type, } if layout_type == 'custom': if stylesheet is not None: payload['stylesheet'] = stylesheet endpoint = self.endpoints.broadcast_url(broadcast_id, layout=True) response = requests.put( endpoint, data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: pass elif response.status_code == 400: raise BroadcastError( 'Invalid request. This response may indicate that data in your request data is ' 'invalid JSON. It may also indicate that you passed in invalid layout options.') elif response.status_code == 403: raise AuthError('Authentication error.') else: raise RequestError('OpenTok server error.', response.status_code)
def set_broadcast_layout(self, broadcast_id, layout_type, stylesheet=None)
Use this method to change the layout type of a live streaming broadcast :param String broadcast_id: The ID of the broadcast that will be updated :param String layout_type: The layout type for the broadcast. Valid values are: 'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation' :param String stylesheet optional: CSS used to style the custom layout. Specify this only if you set the type property to 'custom'
3.572406
3.857777
0.926027
'''Authenticate using XAuth variant of OAuth. :param str username: Username or email address for the relevant account :param str password: Password for the account ''' response = self.request( ACCESS_TOKEN, { 'x_auth_mode': 'client_auth', 'x_auth_username': username, 'x_auth_password': password }, returns_json=False ) token = dict(parse_qsl(response['data'].decode())) self.token = oauth.Token( token['oauth_token'], token['oauth_token_secret']) self.oauth_client = oauth.Client(self.consumer, self.token)
def login(self, username, password)
Authenticate using XAuth variant of OAuth. :param str username: Username or email address for the relevant account :param str password: Password for the account
4.034544
2.893011
1.394583
'''Process a request using the OAuth client's request method. :param str path: Path fragment to the API endpoint, e.g. "resource/ID" :param dict params: Parameters to pass to request :param str method: Optional HTTP method, normally POST for Instapaper :param str api_version: Optional alternative API version :returns: response headers and body :retval: dict ''' time.sleep(REQUEST_DELAY_SECS) full_path = '/'.join([BASE_URL, 'api/%s' % api_version, path]) params = urlencode(params) if params else None log.debug('URL: %s', full_path) request_kwargs = {'method': method} if params: request_kwargs['body'] = params response, content = self.oauth_client.request( full_path, **request_kwargs) log.debug('CONTENT: %s ...', content[:50]) if returns_json: try: data = json.loads(content) if isinstance(data, list) and len(data) == 1: # ugly -- API always returns a list even when you expect # only one item if data[0]['type'] == 'error': raise Exception('Instapaper error %d: %s' % ( data[0]['error_code'], data[0]['message']) ) # TODO: PyInstapaperException custom class? except ValueError: # Instapaper API can be unpredictable/inconsistent, e.g. # bookmarks/get_text doesn't return JSON data = content else: data = content return { 'response': response, 'data': data }
def request(self, path, params=None, returns_json=True, method='POST', api_version=API_VERSION)
Process a request using the OAuth client's request method. :param str path: Path fragment to the API endpoint, e.g. "resource/ID" :param dict params: Parameters to pass to request :param str method: Optional HTTP method, normally POST for Instapaper :param str api_version: Optional alternative API version :returns: response headers and body :retval: dict
4.316513
2.91859
1.478972
path = 'bookmarks/list' params = {'folder_id': folder, 'limit': limit} if have: have_concat = ','.join(str(id_) for id_ in have) params['have'] = have_concat response = self.request(path, params) items = response['data'] bookmarks = [] for item in items: if item.get('type') == 'error': raise Exception(item.get('message')) elif item.get('type') == 'bookmark': bookmarks.append(Bookmark(self, **item)) return bookmarks
def get_bookmarks(self, folder='unread', limit=25, have=None)
Return list of user's bookmarks. :param str folder: Optional. Possible values are unread (default), starred, archive, or a folder_id value. :param int limit: Optional. A number between 1 and 500, default 25. :param list have: Optional. A list of IDs to exclude from results :returns: List of user's bookmarks :rtype: list
2.511766
2.565731
0.978967
path = 'folders/list' response = self.request(path) items = response['data'] folders = [] for item in items: if item.get('type') == 'error': raise Exception(item.get('message')) elif item.get('type') == 'folder': folders.append(Folder(self, **item)) return folders
def get_folders(self)
Return list of user's folders. :rtype: list
2.984089
3.010618
0.991188