code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
stats = {stat: "%f|g" % value} self.send(stats, sample_rate)
def gauge(self, stat, value, sample_rate=1)
Log gauge information for a single stat >>> statsd_client.gauge('some.gauge',42)
7.462899
11.376714
0.65598
if not isinstance(stats, list): stats = [stats] data = dict((stat, "%s|c" % delta) for stat in stats) self.send(data, sample_rate)
def update_stats(self, stats, delta, sample_rate=1)
Updates one or more stats counters by arbitrary amounts >>> statsd_client.update_stats('some.int',10)
4.886701
4.855382
1.00645
if self.prefix: data = dict((".".join((self.prefix, stat)), value) for stat, value in data.items()) if sample_rate < 1: if random.random() > sample_rate: return sampled_data = dict((stat, "%s|@%s" % (value, sample_rate)) for stat, value in data.items()) else: sampled_data = data try: [self.udp_sock.sendto(bytes(bytearray("%s:%s" % (stat, value), "utf-8")), self.addr) for stat, value in sampled_data.items()] except: self.log.exception("unexpected error")
def send(self, data, sample_rate=1)
Squirt the metrics over UDP
3.436466
3.004729
1.143686
pid = None if os.path.exists(self.pidfile): with open(self.pidfile, 'r') as fp: pid = int(fp.read().strip()) if pid: msg = 'pidfile (%s) exists. Daemon already running?\n' sys.stderr.write(msg % self.pidfile) sys.exit(1) self.daemonize() self.run(*args, **kw)
def start(self, *args, **kw)
Start the daemon.
1.935149
1.799428
1.075425
pid = None if os.path.exists(self.pidfile): with open(self.pidfile, 'r') as fp: pid = int(fp.read().strip()) if not pid: msg = 'pidfile (%s) does not exist. Daemon not running?\n' sys.stderr.write(msg % self.pidfile) return try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError as e: e = str(e) if e.find('No such process') > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print(e) sys.exit(1)
def stop(self)
Stop the daemon.
1.79214
1.680011
1.066743
self.stop() self.start(*args, **kw)
def restart(self, *args, **kw)
Restart the daemon.
3.749464
3.694155
1.014972
url = "https://%s/info/system?null" % (netloc) response = requests.get(url, verify=self.verify) if not response.ok or not hasattr(response, "json"): error_message = '%s Unexpected Error: %s for uri: %s\nText: %r' %\ (response.status_code, response.reason, response.url, response.text) raise iControlUnexpectedHTTPError(error_message, response=response) respJson = response.json() result = respJson['providers'] return result
def get_auth_providers(self, netloc)
BIG-IQ specific query for auth providers BIG-IP doesn't really need this because BIG-IP's multiple auth providers seem to handle fallthrough just fine. BIG-IQ on the other hand, needs to have its auth provider specified if you're using one of the non-default ones. :param netloc: :return:
4.7082
4.7662
0.987831
login_body = { 'username': self.username, 'password': self.password, } if self.auth_provider: if self.auth_provider == 'local': login_body['loginProviderName'] = 'local' elif self.auth_provider == 'tmos': login_body['loginProviderName'] = 'tmos' elif self.auth_provider not in ['none', 'default']: providers = self.get_auth_providers(netloc) for provider in providers: if self.auth_provider in provider['link']: login_body['loginProviderName'] = provider['name'] break elif self.auth_provider == provider['name']: login_body['loginProviderName'] = provider['name'] break else: if self.login_provider_name == 'tmos': login_body['loginProviderName'] = self.login_provider_name login_url = "https://%s/mgmt/shared/authn/login" % (netloc) response = requests.post( login_url, json=login_body, verify=self.verify, auth=HTTPBasicAuth(self.username, self.password) ) self.attempts += 1 if not response.ok or not hasattr(response, "json"): error_message = '%s Unexpected Error: %s for uri: %s\nText: %r' %\ (response.status_code, response.reason, response.url, response.text) raise iControlUnexpectedHTTPError(error_message, response=response) respJson = response.json() token = self._get_token_from_response(respJson) created_bigip = self._get_last_update_micros(token) try: expiration_bigip = self._get_expiration_micros( token, created_bigip ) except (KeyError, ValueError): error_message = \ '%s Unparseable Response: %s for uri: %s\nText: %r' %\ (response.status_code, response.reason, response.url, response.text) raise iControlUnexpectedHTTPError(error_message, response=response) try: self.expiration = self._get_token_expiration_time( created_bigip, expiration_bigip ) except iControlUnexpectedHTTPError: error_message = \ '%s Token already expired: %s for uri: %s\nText: %r' % \ (response.status_code, time.ctime(expiration_bigip), response.url, response.text) raise iControlUnexpectedHTTPError(error_message, response=response)
def get_new_token(self, netloc)
Get a new token from BIG-IP and store it internally. Throws relevant exception if it fails to get a new token. This method will be called automatically if a request is attempted but there is no authentication token, or the authentication token is expired. It is usually not necessary for users to call it, but it can be called if it is known that the authentication token has been invalidated by other means.
2.352367
2.301468
1.022116
'''(str, str, str) --> str This function checks the supplied elements to see if each conforms to the specification for the appropriate part of the URI. These validations are conducted by the helper function _validate_uri_parts. After validation the parts are assembled into a valid BigIP REST URI string which is then submitted with appropriate metadata. >>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', \ 'CUSTOMER1', 'nat52', params={'a':1}) 'https://0.0.0.0/mgmt/tm/ltm/nat/~CUSTOMER1~nat52' >>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', \ 'CUSTOMER1', 'nat52', params={'a':1}, suffix='/wacky') 'https://0.0.0.0/mgmt/tm/ltm/nat/~CUSTOMER1~nat52/wacky' >>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', '', '', \ params={'a':1}, suffix='/thwocky') 'https://0.0.0.0/mgmt/tm/ltm/nat/thwocky' ::Warning: There are cases where '/' and '~' characters are valid in the object name or subPath. This is indicated by passing the 'transform_name' or 'transform_subpath' boolean respectively as True. By default this is set to False. ''' _validate_uri_parts(base_uri, partition, name, sub_path, suffix, **kwargs) if kwargs.get('transform_name', False): if name != '': name = name.replace('/', '~') if kwargs.get('transform_subpath', False): if sub_path != '': sub_path = sub_path.replace('/', '~') if partition != '': partition = '~' + partition else: if sub_path: msg = 'When giving the subPath component include partition ' \ 'as well.' raise InvalidURIComponentPart(msg) if sub_path != '' and partition != '': sub_path = '~' + sub_path if name != '' and partition != '': name = '~' + name tilded_partition_and_instance = partition + sub_path + name if suffix and not tilded_partition_and_instance: suffix = suffix.lstrip('/') REST_uri = base_uri + tilded_partition_and_instance + suffix return REST_uri
def generate_bigip_uri(base_uri, partition, name, sub_path, suffix, **kwargs)
(str, str, str) --> str This function checks the supplied elements to see if each conforms to the specification for the appropriate part of the URI. These validations are conducted by the helper function _validate_uri_parts. After validation the parts are assembled into a valid BigIP REST URI string which is then submitted with appropriate metadata. >>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', \ 'CUSTOMER1', 'nat52', params={'a':1}) 'https://0.0.0.0/mgmt/tm/ltm/nat/~CUSTOMER1~nat52' >>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', \ 'CUSTOMER1', 'nat52', params={'a':1}, suffix='/wacky') 'https://0.0.0.0/mgmt/tm/ltm/nat/~CUSTOMER1~nat52/wacky' >>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', '', '', \ params={'a':1}, suffix='/thwocky') 'https://0.0.0.0/mgmt/tm/ltm/nat/thwocky' ::Warning: There are cases where '/' and '~' characters are valid in the object name or subPath. This is indicated by passing the 'transform_name' or 'transform_subpath' boolean respectively as True. By default this is set to False.
3.554523
1.644887
2.160953
@functools.wraps(method) def wrapper(self, RIC_base_uri, **kwargs): partition = kwargs.pop('partition', '') sub_path = kwargs.pop('subPath', '') suffix = kwargs.pop('suffix', '') identifier, kwargs = _unique_resource_identifier_from_kwargs(**kwargs) uri_as_parts = kwargs.pop('uri_as_parts', False) transform_name = kwargs.pop('transform_name', False) transform_subpath = kwargs.pop('transform_subpath', False) if uri_as_parts: REST_uri = generate_bigip_uri(RIC_base_uri, partition, identifier, sub_path, suffix, transform_name=transform_name, transform_subpath=transform_subpath, **kwargs) else: REST_uri = RIC_base_uri pre_message = "%s WITH uri: %s AND suffix: %s AND kwargs: %s" %\ (method.__name__, REST_uri, suffix, kwargs) logger = logging.getLogger(__name__) logger.debug(pre_message) response = method(self, REST_uri, **kwargs) post_message =\ "RESPONSE::STATUS: %s Content-Type: %s Content-Encoding:"\ " %s\nText: %r" % (response.status_code, response.headers.get('Content-Type', None), response.headers.get('Content-Encoding', None), response.text) logger.debug(post_message) if response.status_code not in range(200, 207): error_message = '%s Unexpected Error: %s for uri: %s\nText: %r' %\ (response.status_code, response.reason, response.url, response.text) raise iControlUnexpectedHTTPError(error_message, response=response) return response return wrapper
def decorate_HTTP_verb_method(method)
Prepare and Post-Process HTTP VERB method for BigIP-RESTServer request. This function decorates all of the HTTP VERB methods in the iControlRESTSession class. It provides the core logic for this module. If necessary it validates and assembles a uri from parts with a call to `generate_bigip_uri`. Then it: 1. pre-logs the details of the request 2. submits the request 3. logs the response, included expected status codes 4. raises exceptions for unexpected status codes. (i.e. not doc'd as BigIP RESTServer codes.)
2.920871
2.737305
1.067061
name = kwargs.pop('name', '') uuid = kwargs.pop('uuid', '') id = kwargs.pop('id', '') if uuid: return uuid, kwargs elif id: # Used for /mgmt/cm/system/authn/providers/tmos on BIG-IP return id, kwargs else: return name, kwargs
def _unique_resource_identifier_from_kwargs(**kwargs)
Chooses an identifier given different choices The unique identifier in BIG-IP's REST API at the time of this writing is called 'name'. This is in contrast to the unique identifier that is used by iWorkflow and BIG-IQ which at some times is 'name' and other times is 'uuid'. For example, in iWorkflow, there consider this URI * https://10.2.2.3/mgmt/cm/cloud/tenants/{0}/services/iapp Then consider this iWorkflow URI * https://localhost/mgmt/cm/cloud/connectors/local/{0} In the first example, the identifier, {0}, is what we would normally consider a name. For example, "tenant1". In the second example though, the value is expected to be what we would normally consider to be a UUID. For example, '244bd478-374e-4eb2-8c73-6e46d7112604'. This method only tries to rectify the problem of which to use. I believe there might be some change that the two can appear together, although I have not yet experienced it. If it is possible, I believe it would happen in BIG-IQ/iWorkflow land where the UUID and Name both have significance. That's why I deliberately prefer the UUID when it exists in the parameters sent to the URL. :param kwargs: :return:
5.374598
4.740165
1.133842
args1 = get_request_args(kwargs) args2 = get_send_args(kwargs) req = requests.Request('DELETE', uri, **args1) prepared = self.session.prepare_request(req) if self.debug: self._debug_output.append(debug_prepared_request(prepared)) return self.session.send(prepared, **args2)
def delete(self, uri, **kwargs)
Sends a HTTP DELETE command to the BIGIP REST Server. Use this method to send a DELETE command to the BIGIP. When calling this method with the optional arguments ``name`` and ``partition`` as part of ``**kwargs`` they will be added to the ``uri`` passed in separated by ~ to create a proper BIGIP REST API URL for objects. All other parameters passed in as ``**kwargs`` are passed directly to the :meth:`requests.Session.delete` :param uri: A HTTP URI :type uri: str :param name: The object name that will be appended to the uri :type name: str :arg partition: The partition name that will be appened to the uri :type partition: str :param \**kwargs: The :meth:`reqeusts.Session.delete` optional params
4.059111
4.423669
0.917589
old_ua = self.session.headers.get('User-Agent', '') ua = old_ua + ' ' + user_agent self.session.headers['User-Agent'] = ua.strip()
def append_user_agent(self, user_agent)
Append text to the User-Agent header for the request. Use this method to update the User-Agent header by appending the given string to the session's User-Agent header separated by a space. :param user_agent: A string to append to the User-Agent header :type user_agent: str
2.877076
2.636093
1.091417
is_valid = False exc = None for load in (load_pem_public_key, load_ssh_public_key): if not is_valid: try: load(value.encode('utf-8'), default_backend()) is_valid = True except Exception as e: exc = e if not is_valid: raise ValidationError('Public key is invalid: %s' % exc)
def validate_public_key(value)
Check that the given value is a valid RSA Public key in either PEM or OpenSSH format. If it is invalid, raises ``django.core.exceptions.ValidationError``.
2.746658
2.556129
1.074538
for fmt in cls.OUTPUT_FORMATS: clean_fmt = fmt.replace('+', '_') setattr(cls, clean_fmt, property( (lambda x, fmt=fmt: cls._output(x, fmt)), # fget (lambda x, y, fmt=fmt: cls._input(x, y, fmt))))
def _register_formats(cls)
Adds format properties.
4.04115
3.757275
1.075553
'''Handles pdf and epub format. Inpute: output_filename should have the proper extension. Output: The name of the file created, or an IOError if failed''' temp_file = NamedTemporaryFile(mode="w", suffix=".md", delete=False) temp_file.write(self._content) temp_file.close() subprocess_arguments = [PANDOC_PATH, temp_file.name, '-o %s' % output_filename] subprocess_arguments.extend(self.arguments) cmd = " ".join(subprocess_arguments) fin = os.popen(cmd) msg = fin.read() fin.close() if msg: print("Pandoc message: {}",format(msg)) os.remove(temp_file.name) if exists(output_filename): return output_filename else: raise IOError("Failed creating file: %s" % output_filename)
def to_file(self, output_filename)
Handles pdf and epub format. Inpute: output_filename should have the proper extension. Output: The name of the file created, or an IOError if failed
4.31865
2.787893
1.549073
iat = iat if iat else time.time() if not generate_nonce: generate_nonce = lambda username, iat: random.random() # NOQA token_data = { 'username': username, 'time': iat, 'nonce': generate_nonce(username, iat), } token = jwt.encode(token_data, private_key, algorithm=algorithm) return token
def sign(username, private_key, generate_nonce=None, iat=None, algorithm=DEFAULT_ALGORITHM)
Create a signed JWT using the given username and RSA private key. :param username: Username (string) to authenticate as on the remote system. :param private_key: Private key to use to sign the JWT claim. :param generate_nonce: Optional. Callable to use to generate a new nonce. Defaults to `random.random <https://docs.python.org/3/library/random.html#random.random>`_. :param iat: Optional. Timestamp to include in the JWT claim. Defaults to `time.time <https://docs.python.org/3/library/time.html#time.time>`_. :param algorithm: Optional. Algorithm to use to sign the JWT claim. Default to ``RS512``. See `pyjwt.readthedocs.io <https://pyjwt.readthedocs.io/en/latest/algorithms.html>`_ for other possible algorithms. :return: JWT claim as a string.
2.600947
2.835175
0.917385
unverified_data = jwt.decode(token, options={ 'verify_signature': False }) if 'username' not in unverified_data: return None return unverified_data['username']
def get_claimed_username(token)
Given a JWT, get the username that it is claiming to be `without verifying that the signature is valid`. :param token: JWT claim :return: Username
2.950633
3.485615
0.846517
try: token_data = jwt.decode(token, public_key, algorithms=algorithms) except jwt.InvalidTokenError: logger.debug('JWT failed verification') return False claimed_username = token_data.get('username') claimed_time = token_data.get('time', 0) claimed_nonce = token_data.get('nonce') # Ensure time is within acceptable bounds current_time = time.time() min_time, max_time = (current_time - TIMESTAMP_TOLERANCE, current_time + TIMESTAMP_TOLERANCE) if claimed_time < min_time or claimed_time > max_time: logger.debug('Claimed time is outside of allowable tolerances') return False # Ensure nonce is unique if validate_nonce: if not validate_nonce(claimed_username, claimed_time, claimed_nonce): logger.debug('Claimed nonce failed to validate') return False else: logger.warning('validate_nonce function was not supplied!') # If we've gotten this far, the token is valid return token_data
def verify(token, public_key, validate_nonce=None, algorithms=[DEFAULT_ALGORITHM])
Verify the validity of the given JWT using the given public key. :param token: JWM claim :param public_key: Public key to use when verifying the claim's signature. :param validate_nonce: Callable to use to validate the claim's nonce. :param algorithms: Allowable signing algorithms. Defaults to ['RS512']. :return: False if the token is determined to be invalid or a dictionary of the token data if it is valid.
2.414718
2.404042
1.004441
# TODO: Figure out some way to do this in a thread-safe manner. It'd be better to use # a Redis Set or something, but we don't necessarily want to be tightly coupled to # Redis either since not everyone uses it. key = self.create_nonce_key(username, iat) used = cache.get(key, []) used.append(nonce) cache.set(key, set(used), token.TIMESTAMP_TOLERANCE * 2)
def log_used_nonce(self, username, iat, nonce)
Log a nonce as being used, and therefore henceforth invalid. :param username: Username as a string. :param iat: Unix timestamp float or integer of when the nonce was used. :param nonce: Nonce value.
6.827542
7.595991
0.898835
key = self.create_nonce_key(username, iat) used = cache.get(key, []) return nonce not in used
def validate_nonce(self, username, iat, nonce)
Confirm that the given nonce hasn't already been used. :param username: Username as a string. :param iat: Unix timestamp float or integer of when the nonce was used. :param nonce: Nonce value. :return: True if nonce is valid, False if it is invalid.
6.679399
7.004883
0.953535
if 'HTTP_AUTHORIZATION' not in request.META: return try: method, claim = request.META['HTTP_AUTHORIZATION'].split(' ', 1) except ValueError: return if method.upper() != AUTH_METHOD: return username = token.get_claimed_username(claim) if not username: return User = get_user_model() try: user = User.objects.get(username=username) except User.DoesNotExist: return claim_data = None for public in user.public_keys.all(): claim_data = token.verify(claim, public.key, validate_nonce=self.validate_nonce) if claim_data: break if not claim_data: return logger.debug('Successfully authenticated %s using JWT', user.username) request._dont_enforce_csrf_checks = True request.user = user
def process_request(self, request)
Process a Django request and authenticate users. If a JWT authentication header is detected and it is determined to be valid, the user is set as ``request.user`` and CSRF protection is disabled (``request._dont_enforce_csrf_checks = True``) on the request. :param request: Django Request instance
2.887525
2.836605
1.017951
private = rsa.generate_private_key( public_exponent=public_exponent, key_size=size, backend=default_backend() ) public = private.public_key() if not as_string: return private, public pem_private = private.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, NoEncryption()).decode(ENCODING) pem_public = public.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode(ENCODING) return pem_private, pem_public
def generate_key_pair(size=2048, public_exponent=65537, as_string=True)
Generate a public/private key pair. :param size: Optional. Describes how many bits long the key should be, larger keys provide more security, currently 1024 and below are considered breakable, and 2048 or 4096 are reasonable default key sizes for new keys. Defaults to 2048. :param public_exponent: Optional. Indicates what one mathematical property of the key generation will be. 65537 is the default and should almost always be used. :param as_string: Optional. If True, return tuple of strings. If false, return tuple of RSA key objects. Defaults to True. :return: (PrivateKey<string>, PublicKey<string>) :return: ( `RSAPrivateKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey>`_, `RSAPublicKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey>`_)
1.616136
1.785394
0.905198
key_file = os.path.expanduser(key_file) key_file = os.path.abspath(key_file) if not key_password: with open(key_file, 'r') as key: return key.read() with open(key_file, 'rb') as key: key_bytes = key.read() return decrypt_key(key_bytes, key_password).decode(ENCODING)
def load_private_key(key_file, key_password=None)
Load a private key from disk. :param key_file: File path to key file. :param key_password: Optional. If the key file is encrypted, provide the password to decrypt it. Defaults to None. :return: PrivateKey<string>
2.218958
2.570938
0.863093
private = serialization.load_pem_private_key(key, password=password, backend=default_backend()) return private.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, NoEncryption())
def decrypt_key(key, password)
Decrypt an encrypted private key. :param key: Encrypted private key as a string. :param password: Key pass-phrase. :return: Decrypted private key as a string.
2.237845
2.786617
0.803069
if not key: key = load_private_key(key_file, key_password) claim = token.sign(username, key) return "%s %s" % (AUTH_METHOD, claim.decode(ENCODING))
def create_auth_header(username, key=None, key_file="~/.ssh/id_rsa", key_password=None)
Create an HTTP Authorization header using a private key file. Either a key or a key_file must be provided. :param username: The username to authenticate as on the remote system. :param key: Optional. A private key as either a string or an instance of cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey. :param key_file: Optional. Path to a file containing the user's private key. Defaults to ~/.ssh/id_rsa. Should be in PEM format. :param key_password: Optional. Password to decrypt key_file. If set, should be a bytes object. :return: Authentication header value as a string.
4.754592
5.753986
0.826313
# print('_config_log', 'from %s' %name if name else '') FORMAT = '%(message)s' # maybe better for log files # FORMAT='[%(levelname)s]:%(message)s', # Reset handlers for h in list(logging.root.handlers): logging.root.removeHandler(h) global _log_level if lvl: _log_level = lvl logging.basicConfig(level=_log_level, format=FORMAT, stream=sys.stdout) _log = logging.getLogger(__name__) _log.setLevel(_log_level) # external for log in ['urllib3', 'asyncio']: logging.getLogger(log).setLevel(_log_level)
def _config_logs(lvl=None, name=None)
Set up or change logging configuration. _config_logs() => idempotent setup; _config_logs(L) => change log level
4.124968
4.496734
0.917325
columns = tuple(columns) lens = [] for key in columns: value_len = max(len(str(each.get(key, ''))) for each in data) # account for header lengths lens.append(max(value_len, len(self._get_name(key)))) fmt = self.mk_fmt(*lens) return fmt
def mk_set_headers(self, data, columns)
figure out sizes and create header fmt
5.453835
4.84368
1.125969
if key in self.display_names: return self.display_names[key] return key.capitalize()
def _get_name(self, key)
get display name for a key, or mangle for display
4.597477
3.109519
1.478517
self.display_section("%s (%d)" % (self._get_name(typ), len(data))) headers = tuple(map(self._get_name, columns)) fmt = self.mk_set_headers(data, columns) self.display_headers(fmt, headers) for each in data: row = tuple(self._get_val(each, k) for k, v in each.items()) self._print(fmt % row) self._print("\n")
def display_set(self, typ, data, columns)
display a list of dicts
4.28463
4.342372
0.986703
string = u" ".join(args) + '\n' self.fobj.write(string)
def _print(self, *args)
internal print to self.fobj
7.321458
4.336535
1.68832
if hasattr(self, 'print_' + typ): getattr(self, 'print_' + typ)(data) elif not data: self._print("%s: %s" % (typ, data)) elif isinstance(data, collections.Mapping): self._print("\n", typ) for k, v in data.items(): self.print(k, v) elif isinstance(data, (list, tuple)): # tabular data layout for lists of dicts if isinstance(data[0], collections.Mapping): self.display_set(typ, data, self._get_columns(data[0])) else: for each in data: self.print(typ, each) else: self._print("%s: %s" % (typ, data)) self.fobj.flush()
def display(self, typ, data)
display section of typ with data
3.043042
3.048657
0.998158
"Decorate a command handler" def _wrapped(*a, **k): r = func(*a, **k) if r is None: r = 0 return r return staticmethod(_wrapped)
def _handler(func)
Decorate a command handler
6.40087
5.888624
1.086989
"Add commands to a parser" subps = parser.add_subparsers() for cmd, cls in commands: subp = subps.add_parser(cmd, help=cls.__doc__) add_args = getattr(cls, 'add_arguments', None) if add_args: add_args(subp) handler = getattr(cls, 'handle', None) if handler: subp.set_defaults(handler=handler)
def add_subcommands(parser, commands)
Add commands to a parser
2.245683
2.190664
1.025115
names, _, _, values = inspect.getargspec(fn) @wraps(fn) def wrapped(*args, **kwargs): i = 0 backend = args[0] for name in names[1:]: value = args[i] if name == "concrete" and isinstance(value, six.string_types): args[i] = backend.REFTAG_CONCRETE[value] elif name == "resource" and isinstance(value, six.string_types): args[i] = backend.REFTAG_RESOURCE[value] i += 1 return fn(*args, **kwargs) return wrapped
def reftag_to_cls(fn)
decorator that checks function arguments for `concrete` and `resource` and will properly set them to class references if a string (reftag) is passed as the value
2.772295
2.323461
1.193175
fetch = lambda: self._fetcher.fetch_latest(res, pk, 1, since=since) self._update(res, fetch, depth)
def update(self, res, pk, depth=1, since=None)
Try to sync an object to the local database, in case of failure where a referenced object is not found, attempt to fetch said object from the REST api
7.007685
8.065427
0.868855
"Like update() but uses WHERE-style args" fetch = lambda: self._fetcher.fetch_all_latest(res, 0, kwargs, since=since) self._update(res, fetch, depth)
def update_where(self, res, depth=0, since=None, **kwargs)
Like update() but uses WHERE-style args
13.890917
8.442216
1.645411
"Sync all objects for the relations rs (if None, sync all resources)" self._log.info("Updating resources: %s", ' '.join(r.tag for r in rs)) if rs is None: rs = resource.all_resources() ctx = self._ContextClass(self) for r in rs: self._atomic_update(lambda: ctx.sync_resource(r, since=since))
def update_all(self, rs=None, since=None)
Sync all objects for the relations rs (if None, sync all resources)
8.831351
4.93659
1.788958
res, pk = key jobs, lock = self._jobs with lock: return jobs[res].get(pk)
def get_task(self, key)
Get a scheduled task, or none
17.75886
18.743645
0.94746
res, pk = key jobs, lock = self._jobs task = _tasks.UpdateTask(func(*args), key) with lock: job = jobs[res].get(pk) had = bool(job) if not job: job = task jobs[res][pk] = job else: task.cancel() self._log.debug('Scheduling: %s-%s (%s)', res.tag, pk, 'new task' if not had else 'dup') return job
def set_job(self, key, func, args)
Get a scheduled task or set if none exists. Returns: - task coroutine/continuation
7.329083
7.464737
0.981827
"Synchronized access to tasks" jobs, lock = self._jobs with lock: return jobs[res].copy()
def pending_tasks(self, res)
Synchronized access to tasks
18.864149
10.65611
1.770266
"Fetch data with func, return dict indexed by ID" data, e = fetch_func() if e: raise e yield {row['id']: row for row in data}
def fetch_and_index(self, fetch_func)
Fetch data with func, return dict indexed by ID
11.19716
6.817725
1.64236
try: clean_func(obj) except B.validation_error() as e: # _debug.log_validation_errors(B, e, obj, k) # Check if it's a uniqueness or missing relation error fields = B.detect_uniqueness_error(e) missing = B.detect_missing_relations(obj, e) return fields, missing return (None, None)
def clean_helper(B, obj, clean_func)
Clean object, intercepting and collecting any missing-relation or unique-constraint errors and returning the relevant resource ids/fields. Returns: - tuple: (<dict of non-unique fields>, <dict of missing refs>)
7.319979
6.098858
1.200221
B = get_backend() field_groups = FieldGroups(B.get_concrete(res)) try: obj = B.get_object(B.get_concrete(res), row['id']) except B.object_missing_error(B.get_concrete(res)): tbl = B.get_concrete(res) obj = tbl() # Set attributes, refs for fname, field in field_groups['scalars'].items(): value = row.get(fname, getattr(obj, fname, None)) value = B.convert_field(obj.__class__, fname, value) setattr(obj, fname, value) # _debug('res, row: %s, %s', res, row) # Already-fetched, and id-only refs fetched, dangling = defaultdict(dict), defaultdict(set) # To handle subrows that might be shallow (id) or deep (dict) def _handle_subrow(R, subrow): if isinstance(subrow, dict): pk = subrow['id'] fetched[R][pk] = subrow else: pk = subrow dangling[R].add(pk) return pk for fname, field in field_groups['one_refs'].items(): fieldres = _field_resource(B, B.get_concrete(res), fname) key = field.column subrow = row.get(key) if subrow is None: # e.g. use "org" if "org_id" is missing key = fname subrow = row[key] pk = _handle_subrow(fieldres, subrow) setattr(obj, key, pk) for fname, field in field_groups['many_refs'].items(): fieldres = _field_resource(B, B.get_concrete(res), fname) pks = [ _handle_subrow(fieldres, subrow) for subrow in row.get(fname, []) ] return obj, fetched, dangling
def initialize_object(B, res, row)
Do a shallow initialization of an object Arguments: - row<dict>: dict of data like depth=1, i.e. many_refs are only ids
4.083528
4.054594
1.007136
"Find and read config file for a directory, return None if not found." conf_path = os.path.expanduser(conf_dir) if not os.path.exists(conf_path): # only throw if not default if conf_dir != DEFAULT_CONFIG_DIR: raise IOError("Config directory not found at %s" % (conf_path, )) return munge.load_datafile('config', conf_path, default=None)
def read_config(conf_dir=DEFAULT_CONFIG_DIR)
Find and read config file for a directory, return None if not found.
5.34659
4.05902
1.317212
data = default_config(schema) config = read_config(conf_dir) if config: recursive_update(data, config) return data
def load_config(conf_dir=DEFAULT_CONFIG_DIR, schema=CLIENT_SCHEMA)
Load config files from the specified directory, using defaults for missing values. Directory should contain a file named config.<ext> where <ext> is a supported config file format.
5.025551
5.565589
0.902968
"Check for a config file with old schema" if not data: return False ok, errors, warnings = _schema.validate(_OLD_SCHEMA, data) return ok and not (errors or warnings)
def detect_old(data)
Check for a config file with old schema
9.370766
6.225241
1.505286
"Convert config data with old schema to new schema" ret = default_config() ret['sync'].update(data.get('peeringdb', {})) ret['orm']['database'].update(data.get('database', {})) return ret
def convert_old(data)
Convert config data with old schema to new schema
8.972816
7.260478
1.235844
if not codec: codec = 'yaml' codec = munge.get_codec(codec)() conf_dir = os.path.expanduser(conf_dir) if not os.path.exists(conf_dir): os.mkdir(conf_dir) # Check for existing file, back up if necessary outpath = os.path.join(conf_dir, 'config.' + codec.extensions[0]) if backup_existing and os.path.exists(outpath): os.rename(outpath, outpath + '.bak') codec.dump(data, open(outpath, 'w'))
def write_config(data, conf_dir=DEFAULT_CONFIG_DIR, codec="yaml", backup_existing=False)
Write config values to a file. Arguments: - conf_dir<str>: path to output directory - codec<str>: output field format - backup_existing<bool>: if a config file exists, make a copy before overwriting
2.462282
2.803713
0.878222
out = {} for name, attr in sch.attributes(): fullpath = name if path: fullpath = '{}.{}'.format(path, name) if defaults is None: defaults = {} default = defaults.get(name) if isinstance(attr, _schema.Schema): # recurse on sub-schema value = prompt_config(attr, defaults=default, path=fullpath) else: if default is None: default = attr.default if default is None: default = '' value = prompt(fullpath, default) out[name] = value return sch.validate(out)
def prompt_config(sch, defaults=None, path=None)
Utility function to recursively prompt for config values Arguments: - defaults<dict>: default values used for empty inputs - path<str>: path to prepend to config keys (eg. "path.keyname")
2.773068
3.060331
0.906133
"Request object from API" d, e = self._fetcher.fetch(R, pk, depth) if e: raise e return d
def fetch(self, R, pk, depth=1)
Request object from API
10.41486
6.912074
1.506763
"Request multiple objects from API" d, e = self._fetcher.fetch_all(R, depth, kwargs) if e: raise e return d
def fetch_all(self, R, depth=1, **kwargs)
Request multiple objects from API
11.058311
8.023828
1.378184
"Get a resource instance by primary key (id)" B = get_backend() return B.get_object(B.get_concrete(res), pk)
def get(self, res, pk)
Get a resource instance by primary key (id)
10.75935
8.311219
1.294557
"Get resources using a filter condition" B = get_backend() return B.get_objects(B.get_concrete(res))
def all(self, res)
Get resources using a filter condition
24.627663
14.606148
1.686116
def _wrapped(*a, **k): gen = func(*a, **k) return _consume_task(gen) return _wrapped
def run_task(func)
Decorator to collect and return generator results, returning a list if there are multiple results
6.044031
5.266199
1.147703
return self._load(self._request(typ, id=id, params=kwargs))
def get(self, typ, id, **kwargs)
Load type by id
7.766023
6.699406
1.159211
backend, backend_version = peeringdb.get_backend_info() user_agent = 'PeeringDB/{} {}/{}'.format(peeringdb.__version__, backend, backend_version) headers = { "Accept": "application/json", "User-Agent": user_agent, } auth = None if self.user: auth = (self.user, self.password) if not url: if id: url = "%s/%s/%s" % (self.url, typ, id) else: url = "%s/%s" % (self.url, typ) return requests.request(method, url, params=params, data=data, auth=auth, headers=headers)
def _request(self, typ, id=0, method='GET', params=None, data=None, url=None)
send the request, return response obj
2.35716
2.354776
1.001012
async def _wrapped(*a, **k): r, ret = None, [] gen = func(*a, **k) while True: try: item = gen.send(r) except StopIteration: break if inspect.isawaitable(item): r = await item else: r = item ret.append(r) if len(ret) == 1: return ret.pop() return ret return _wrapped
def wrap_generator(func)
Decorator to convert a generator function to an async function which collects and returns generator results, returning a list if there are multiple results
2.633903
2.530183
1.040993
def _wrapped(*a, **k): loop = asyncio.get_event_loop() return loop.run_until_complete(func(*a, **k)) return _wrapped
def run_task(func)
Decorator to wrap an async function in an event loop. Use for main sync interface methods.
2.909317
2.481614
1.172349
re_tag = re.compile('^(?P<tag>[a-zA-Z]+)[\s-]*(?P<pk>\d+)$') m = re_tag.search(string) if not m: raise ValueError("unable to split string '%s'" % (string, )) return (m.group('tag').lower(), int(m.group('pk')))
def split_ref(string)
splits a string into (tag, id)
3.226119
2.883723
1.118734
"Prompt for input" if default is not None: msg = '{} ({})'.format(msg, repr(default)) msg = '{}: '.format(msg) try: s = input(msg) except KeyboardInterrupt: exit(1) except EOFError: s = '' if not s: s = default return s
def prompt(msg, default=None)
Prompt for input
2.879755
2.905655
0.991086
"Set soft memory limit" rsrc = resource.RLIMIT_DATA soft, hard = resource.getrlimit(rsrc) resource.setrlimit(rsrc, (limit, hard)) # 4GB softnew, _ = resource.getrlimit(rsrc) assert softnew == limit _log = logging.getLogger(__name__) _log.debug('Set soft memory limit: %s => %s', soft, softnew)
def limit_mem(limit=(4 * 1024**3))
Set soft memory limit
3.602653
3.508302
1.026894
inits = [i for i in inits if len(i) > 0] output = pytorch_model_template.format(**{ 'module_name': pytorch_module_name, 'module_name_lower': pytorch_module_name.lower(), 'inits': '\n'.join(inits), 'inputs': inputs, 'calls': '\n'.join(calls), 'outputs': outputs, }) if dst_dir is not None: import os import errno try: os.makedirs(dst_dir) except OSError as e: if e.errno != errno.EEXIST: raise with open(os.path.join(dst_dir, pytorch_module_name.lower() + '.py'), 'w+') as f: f.write(output) f.close() torch.save(pytorch_dict, os.path.join(dst_dir, pytorch_module_name.lower() + '.pt')) return output
def render_module(inits, calls, inputs, outputs, dst_dir, pytorch_dict, pytorch_module_name)
Render model.
1.849444
1.837532
1.006483
x = [mx.nd.array(np.ones(i)) for i in args] x = net(*x) # Get network params params = net.collect_params() # Create a symbol to trace net x = [mx.sym.var('__input__' + str(i)) for i in range(len(args))] sym = net(*x) if len(sym) > 1: group = mx.sym.Group(sym) else: group = sym # Get JSON-definition of the model json_model = json.loads(group.tojson())['nodes'] # Create empty accumulators nodes = [] is_skipped = [] pytorch_dict = {} inits = [] calls = [] inputs = [] outputs = [i[0] for i in json.loads(group.tojson())['heads']] last = 0 if keep_names: names_dict = {} else: names_dict = None # Trace model for i, node in enumerate(json_model): # If the node has 'null' op, it means, that it's not a real op, but only parameter # TODO: convert constants if keep_names: names_dict[i] = node['name'] if node['op'] == 'null': if node['name'].find('__input__') == 0: inputs.append(int(node['name'][9:])) is_skipped.append(1) continue # It's not 'null' is_skipped.append(0) # Create dict with necessary node parameters op = { 'name': node['name'][:-4], 'type': node['op'], } print(op, node) if len(node['inputs']) > 0: orginal_inputs = [i for i in np.array(node['inputs'])[:, 0] if i in inputs] op['inputs'] = [i for i in np.array(node['inputs'])[:, 0] if is_skipped[i] != 1 or i in orginal_inputs] else: print(json_model) op['inputs'] = [] try: # Not all nodes have 'attrs' op['attrs'] = node['attrs'] except KeyError: op['attrs'] = {} # Debug output if debug: print(op) print('__') # Append new node to list nodes.append(op) # If operation is in available convertors, convert it if op['type'] in CONVERTERS: init_str, call_str = CONVERTERS[op['type']](i, op, nodes, params, pytorch_dict, names_dict, debug) inits.append(init_str) calls.append(call_str) else: raise AttributeError('Layer isn\'t supported') if names_dict is not None: inputs = ', '.join([names_dict[i] for i in inputs]) outputs = ', '.join([names_dict[i] for i in outputs]) else: inputs = ', '.join(['x' + str(i) for i in inputs]) outputs = ', '.join(['x' + str(i) for i in outputs]) pytorch_source = render_module(inits, calls, inputs, outputs, dst_dir, pytorch_dict, pytorch_module_name) return eval_model(pytorch_source, pytorch_dict, pytorch_module_name)
def gluon2pytorch(net, args, dst_dir, pytorch_module_name, debug=True, keep_names=False)
Function to convert a model.
3.395504
3.387408
1.00239
if sys.version_info.major != 2: return if package.__name__ == 'odoo': if not hasattr(package, 'release'): # Since 'release' is not in the odoo package, it means # odoo/__init__.py did not run, so what we have here is a dummy # odoo package created by setuptools' *-nspkg.pth files. # We remove it so 'import odoo' that will be done in the actual # main program will have a chance to run odoo/__init__.py. if 'odoo.addons' in sys.modules: del sys.modules['odoo.addons'] if 'odoo' in sys.modules: del sys.modules['odoo']
def hook_odoo(package)
work around Odoo 10 issue https://github.com/acsone/setuptools-odoo/issues/10 # This hook should runs after all *-nspkg.pth files because it is named # zzz_ and .pth file run in alphabetical order.
5.471046
4.324039
1.265263
outputMsg = "Current ftp connections:\n" counter = 1 for k in self.ftpList: outputMsg += str(counter) + ". " + k + " " outputMsg += str(self.ftpList[k]) + "\n" counter += 1 if self.printOutput: logger.info(outputMsg) return self.ftpList
def getAllFtpConnections(self)
Returns a dictionary containing active ftp connections.
3.352835
3.029798
1.10662
if connId in self.ftpList: errMsg = "Connection with ID %s already exist. It should be deleted before this step." % connId raise FtpLibraryError(errMsg) else: newFtp = None outputMsg = "" try: timeout = int(timeout) port = int(port) newFtp = ftplib.FTP() outputMsg += newFtp.connect(host, port, timeout) outputMsg += newFtp.login(user,password) except socket.error as se: raise FtpLibraryError('Socket error exception occured.') except ftplib.all_errors as e: raise FtpLibraryError(str(e)) except Exception as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) self.__addNewConnection(newFtp, connId)
def ftp_connect(self, host, user='anonymous', password='anonymous@', port=21, timeout=30, connId='default')
Constructs FTP object, opens a connection and login. Call this function before any other (otherwise raises exception). Returns server output. Parameters: - host - server host address - user(optional) - FTP user name. If not given, 'anonymous' is used. - password(optional) - FTP password. If not given, 'anonymous@' is used. - port(optional) - TCP port. By default 21. - timeout(optional) - timeout in seconds. By default 30. - connId(optional) - connection identifier. By default equals 'default' Examples: | ftp connect | 192.168.1.10 | mylogin | mypassword | | | | ftp connect | 192.168.1.10 | | | | | | ftp connect | 192.168.1.10 | mylogin | mypassword | connId=secondConn | | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | 20 | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | | | ftp connect | 192.168.1.10 | mylogin | mypassword | timeout=20 | | | ftp connect | 192.168.1.10 | port=29 | timeout=20 | | |
3.267852
3.333034
0.980444
thisConn = self.__getConnection(connId) outputMsg = "" try: outputMsg += thisConn.getwelcome() except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
def get_welcome(self, connId='default')
Returns wlecome message of FTP server. Parameters: - connId(optional) - connection identifier. By default equals 'default'
5.037722
4.632189
1.087547
dirList = [] thisConn = self.__getConnection(connId) outputMsg = "" try: thisConn.dir(dirList.append) for d in dirList: outputMsg += str(d) + "\n" except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return dirList
def dir(self, connId='default')
Returns list of raw lines returned as contens of current directory. Parameters: - connId(optional) - connection identifier. By default equals 'default'
3.981489
4.207532
0.946277
files_list = [] thisConn = self.__getConnection(connId) try: files_list = thisConn.nlst() except: files_list = [] return files_list
def dir_names(self, connId='default')
Returns list of files (and/or directories) of current directory. Parameters: - connId(optional) - connection identifier. By default equals 'default'
4.497691
4.380577
1.026735
thisConn = self.__getConnection(connId) outputMsg = "" localPath = "" if localFilePath == None: localPath = remoteFileName else: localPath = os.path.normpath(localFilePath) if os.path.isdir(localPath): localPath = os.path.join(localPath, remoteFileName) try: with open(localPath, 'wb') as localFile: outputMsg += thisConn.retrbinary("RETR " + remoteFileName, localFile.write) except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
def download_file(self, remoteFileName, localFilePath=None, connId='default')
Downloads file from current directory on FTP server in binary mode. If localFilePath is not given, file is saved in current local directory (by default folder containing robot framework project file) with the same name as source file. Returns server output Parameters: - remoteFileName - file name on FTP server - localFilePath (optional) - local file name or path where remote file should be saved. - connId(optional) - connection identifier. By default equals 'default' localFilePath variable can have following meanings: 1. file name (will be saved in current default directory); 2. full path (dir + file name) 3. dir path (original file name will be added) Examples: | download file | a.txt | | | | download file | a.txt | b.txt | connId=ftp1 | | download file | a.txt | D:/rfftppy/tmp | | | download file | a.txt | D:/rfftppy/tmp/b.txt | | | download file | a.txt | D:\\rfftppy\\tmp\\c.txt | |
2.614836
2.780032
0.940578
thisConn = self.__getConnection(connId) outputMsg = "" remoteFileName_ = "" localFilePath = os.path.normpath(localFileName) if not os.path.isfile(localFilePath): raise FtpLibraryError("Valid file path should be provided.") else: if remoteFileName==None: fileTuple = os.path.split(localFileName) if len(fileTuple)==2: remoteFileName_ = fileTuple[1] else: remoteFileName_ = 'defaultFileName' else: remoteFileName_ = remoteFileName try: outputMsg += thisConn.storbinary("STOR " + remoteFileName_, open(localFilePath, "rb")) except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
def upload_file(self, localFileName, remoteFileName=None, connId='default')
Sends file from local drive to current directory on FTP server in binary mode. Returns server output. Parameters: - localFileName - file name or path to a file on a local drive. - remoteFileName (optional) - a name or path containing name under which file should be saved. - connId(optional) - connection identifier. By default equals 'default' If remoteFileName agument is not given, local name will be used. Examples: | upload file | x.txt | connId=ftp1 | | upload file | D:/rfftppy/y.txt | | | upload file | u.txt | uu.txt | | upload file | D:/rfftppy/z.txt | zz.txt | | upload file | D:\\rfftppy\\v.txt | |
3.038583
3.269925
0.929252
thisConn = self.__getConnection(connId) outputMsg = "" try: tmpSize = thisConn.size(fileToCheck) outputMsg += str(tmpSize) except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
def size(self, fileToCheck, connId='default')
Checks size of a file on FTP server. Returns size of a file in bytes (integer). Parameters: - fileToCheck - file name or path to a file on FTP server - connId(optional) - connection identifier. By default equals 'default' Example: | ${file1size} = | size | /home/myname/tmp/uu.txt | connId=ftp1 | | Should Be Equal As Numbers | ${file1size} | 31 | | Note that the SIZE command is not standardized, but is supported by many common server implementations.
4.21722
4.52387
0.932215
thisConn = self.__getConnection(connId) outputMsg = "" try: outputMsg += str(thisConn.sendcmd(command)) except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
def send_cmd(self, command, connId='default')
Sends any command to FTP server. Returns server output. Parameters: - command - any valid command to be sent (invalid will result in exception). - connId(optional) - connection identifier. By default equals 'default' Example: | send cmd | HELP |
4.801071
4.547279
1.055812
thisConn = self.__getConnection(connId) try: thisConn.quit() self.__removeConnection(connId) except Exception as e: try: thisConn.close() self.__removeConnection(connId) except ftplib.all_errors as x: raise FtpLibraryError(str(x))
def ftp_close(self, connId='default')
Closes FTP connection. Returns None. Parameters: - connId(optional) - connection identifier. By default equals 'default'
3.231974
3.410187
0.947741
root_logger = logging.getLogger(name) root_logger.setLevel(logging.DEBUG if verbose > 0 else logging.INFO) formatter = ColorFormatter(verbose > 0, colors) if colors: colorclass.Windows.enable() handler_stdout = logging.StreamHandler(sys.stdout) handler_stdout.setFormatter(formatter) handler_stdout.setLevel(logging.DEBUG) handler_stdout.addFilter(type('', (logging.Filter,), {'filter': staticmethod(lambda r: r.levelno <= logging.INFO)})) root_logger.addHandler(handler_stdout) handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(formatter) handler_stderr.setLevel(logging.WARNING) root_logger.addHandler(handler_stderr)
def setup_logging(verbose=0, colors=False, name=None)
Configure console logging. Info and below go to stdout, others go to stderr. :param int verbose: Verbosity level. > 0 print debug statements. > 1 passed to sphinx-build. :param bool colors: Print color text in non-verbose mode. :param str name: Which logger name to set handlers to. Used for testing.
2.170155
2.334397
0.929643
formatted = super(ColorFormatter, self).format(record) if self.verbose or not record.name.startswith(self.SPECIAL_SCOPE): return formatted # Arrow. formatted = '=> ' + formatted # Colors. if not self.colors: return formatted if record.levelno >= logging.ERROR: formatted = str(colorclass.Color.red(formatted)) elif record.levelno >= logging.WARNING: formatted = str(colorclass.Color.yellow(formatted)) else: formatted = str(colorclass.Color.cyan(formatted)) return formatted
def format(self, record)
Apply little arrow and colors to the record. Arrow and colors are only applied to sphinxcontrib.versioning log statements. :param logging.LogRecord record: The log record object to log.
3.328383
3.350986
0.993255
gen = iter(iterator) while True: chunked = list() for i, item in enumerate(gen): chunked.append(item) if i >= max_size - 1: break if not chunked: return yield chunked
def chunk(iterator, max_size)
Chunk a list/set/etc. :param iter iterator: The iterable object to chunk. :param int max_size: Max size of each chunk. Remainder chunk may be smaller. :return: Yield list of items. :rtype: iter
2.40764
3.293489
0.73103
log = logging.getLogger(__name__) # Setup env. env = os.environ.copy() if environ: env.update(environ) if env_var and not IS_WINDOWS: env['GIT_DIR'] = os.path.join(local_root, '.git') else: env.pop('GIT_DIR', None) # Run command. with open(os.devnull) as null: main = Popen(command, cwd=local_root, env=env, stdout=PIPE, stderr=PIPE if pipeto else STDOUT, stdin=null) if pipeto: pipeto(main.stdout) main_output = main.communicate()[1].decode('utf-8') # Might deadlock if stderr is written to a lot. else: main_output = main.communicate()[0].decode('utf-8') log.debug(json.dumps(dict(cwd=local_root, command=command, code=main.poll(), output=main_output))) # Verify success. if main.poll() != 0: if retry < 1: raise CalledProcessError(main.poll(), command, output=main_output) time.sleep(0.1) return run_command(local_root, command, env_var, pipeto, retry - 1) return main_output
def run_command(local_root, command, env_var=True, pipeto=None, retry=0, environ=None)
Run a command and return the output. :raise CalledProcessError: Command exits non-zero. :param str local_root: Local path to git root directory. :param iter command: Command to run. :param dict environ: Environment variables to set/override in the command. :param bool env_var: Define GIT_DIR environment variable (on non-Windows). :param function pipeto: Pipe `command`'s stdout to this function (only parameter given). :param int retry: Retry this many times on CalledProcessError after 0.1 seconds. :return: Command output. :rtype: str
2.549651
2.374843
1.073608
command = ['git', 'rev-parse', '--show-toplevel'] try: output = run_command(directory, command, env_var=False) except CalledProcessError as exc: raise GitError('Failed to find local git repository root in {}.'.format(repr(directory)), exc.output) if IS_WINDOWS: output = output.replace('/', '\\') return output.strip()
def get_root(directory)
Get root directory of the local git repo from any subdirectory within it. :raise GitError: If git command fails (dir not a git repo?). :param str directory: Subdirectory in the local repo. :return: Root directory of repository. :rtype: str
3.851
4.022085
0.957464
command = ['git', 'ls-remote', '--heads', '--tags'] try: output = run_command(local_root, command) except CalledProcessError as exc: raise GitError('Git failed to list remote refs.', exc.output) # Dereference annotated tags if any. No need to fetch annotations. if '^{}' in output: parsed = list() for group in (m.groupdict() for m in RE_REMOTE.finditer(output)): dereferenced, name, kind = group['name'].endswith('^{}'), group['name'][:-3], group['kind'] if dereferenced and parsed and kind == parsed[-1]['kind'] == 'tags' and name == parsed[-1]['name']: parsed[-1]['sha'] = group['sha'] else: parsed.append(group) else: parsed = [m.groupdict() for m in RE_REMOTE.finditer(output)] return [[i['sha'], i['name'], i['kind']] for i in parsed]
def list_remote(local_root)
Get remote branch/tag latest SHAs. :raise GitError: When git ls-remote fails. :param str local_root: Local path to git root directory. :return: List of tuples containing strings. Each tuple is sha, name, kind. :rtype: list
3.435597
3.408264
1.008019
dates_paths = dict() # Filter without docs. for commit in commits: if commit in dates_paths: continue command = ['git', 'ls-tree', '--name-only', '-r', commit] + conf_rel_paths try: output = run_command(local_root, command) except CalledProcessError as exc: raise GitError('Git ls-tree failed on {0}'.format(commit), exc.output) if output: dates_paths[commit] = [None, output.splitlines()[0].strip()] # Get timestamps by groups of 50. command_prefix = ['git', 'show', '--no-patch', '--pretty=format:%ct'] for commits_group in chunk(dates_paths, 50): command = command_prefix + commits_group output = run_command(local_root, command) timestamps = [int(i) for i in RE_UNIX_TIME.findall(output)] for i, commit in enumerate(commits_group): dates_paths[commit][0] = timestamps[i] # Done. return dates_paths
def filter_and_date(local_root, conf_rel_paths, commits)
Get commit Unix timestamps and first matching conf.py path. Exclude commits with no conf.py file. :raise CalledProcessError: Unhandled git command failure. :raise GitError: A commit SHA has not been fetched. :param str local_root: Local path to git root directory. :param iter conf_rel_paths: List of possible relative paths (to git root) of Sphinx conf.py (e.g. docs/conf.py). :param iter commits: List of commit SHAs. :return: Commit time (seconds since Unix epoch) for each commit and conf.py path. SHA keys and [int, str] values. :rtype: dict
3.280718
3.302346
0.993451
# Fetch all known branches. command = ['git', 'fetch', 'origin'] run_command(local_root, command) # Fetch new branches/tags. for sha, name, kind in remotes: try: run_command(local_root, ['git', 'reflog', sha]) except CalledProcessError: run_command(local_root, command + ['refs/{0}/{1}'.format(kind, name)]) run_command(local_root, ['git', 'reflog', sha])
def fetch_commits(local_root, remotes)
Fetch from origin. :raise CalledProcessError: Unhandled git command failure. :param str local_root: Local path to git root directory. :param iter remotes: Output of list_remote().
3.357169
3.666943
0.915523
log = logging.getLogger(__name__) target = os.path.realpath(target) mtimes = list() # Define extract function. def extract(stdout): queued_links = list() try: with tarfile.open(fileobj=stdout, mode='r|') as tar: for info in tar: log.debug('name: %s; mode: %d; size: %s; type: %s', info.name, info.mode, info.size, info.type) path = os.path.realpath(os.path.join(target, info.name)) if not path.startswith(target): # Handle bad paths. log.warning('Ignoring tar object path %s outside of target directory.', info.name) elif info.isdir(): # Handle directories. if not os.path.exists(path): os.makedirs(path, mode=info.mode) elif info.issym() or info.islnk(): # Queue links. queued_links.append(info) else: # Handle files. tar.extract(member=info, path=target) if os.path.splitext(info.name)[1].lower() == '.rst': mtimes.append(info.name) for info in (i for i in queued_links if os.path.exists(os.path.join(target, i.linkname))): tar.extract(member=info, path=target) except tarfile.TarError as exc: log.debug('Failed to extract output from "git archive" command: %s', str(exc)) # Run command. run_command(local_root, ['git', 'archive', '--format=tar', commit], pipeto=extract) # Set mtime. for file_path in mtimes: last_committed = int(run_command(local_root, ['git', 'log', '-n1', '--format=%at', commit, '--', file_path])) os.utime(os.path.join(target, file_path), (last_committed, last_committed))
def export(local_root, commit, target)
Export git commit to directory. "Extracts" all files at the commit to the target directory. Set mtime of RST files to last commit date. :raise CalledProcessError: Unhandled git command failure. :param str local_root: Local path to git root directory. :param str commit: Git commit SHA to export. :param str target: Directory to export to.
2.831085
2.738592
1.033774
log = logging.getLogger(__name__) output = run_command(local_root, ['git', 'remote', '-v']) remotes = dict() for match in RE_ALL_REMOTES.findall(output): remotes.setdefault(match[0], [None, None]) if match[2] == 'fetch': remotes[match[0]][0] = match[1] else: remotes[match[0]][1] = match[1] if not remotes: raise GitError('Git repo has no remotes.', output) if remote not in remotes: raise GitError('Git repo missing remote "{}".'.format(remote), output) # Clone. try: run_command(new_root, ['git', 'clone', remotes[remote][0], '--depth=1', '--branch', branch, '.']) except CalledProcessError as exc: raise GitError('Failed to clone from remote repo URL.', exc.output) # Make sure user didn't select a tag as their DEST_BRANCH. try: run_command(new_root, ['git', 'symbolic-ref', 'HEAD']) except CalledProcessError as exc: raise GitError('Specified branch is not a real branch.', exc.output) # Copy all remotes from original repo. for name, (fetch, push) in remotes.items(): try: run_command(new_root, ['git', 'remote', 'set-url' if name == 'origin' else 'add', name, fetch], retry=3) run_command(new_root, ['git', 'remote', 'set-url', '--push', name, push], retry=3) except CalledProcessError as exc: raise GitError('Failed to set git remote URL.', exc.output) # Done if no exclude. if not exclude: return # Resolve exclude paths. exclude_joined = [ os.path.relpath(p, new_root) for e in exclude for p in glob.glob(os.path.join(new_root, rel_dest, e)) ] log.debug('Expanded %s to %s', repr(exclude), repr(exclude_joined)) # Do "git rm". try: run_command(new_root, ['git', 'rm', '-rf', rel_dest]) except CalledProcessError as exc: raise GitError('"git rm" failed to remove ' + rel_dest, exc.output) # Restore files in exclude. run_command(new_root, ['git', 'reset', 'HEAD'] + exclude_joined) run_command(new_root, ['git', 'checkout', '--'] + exclude_joined)
def clone(local_root, new_root, remote, branch, rel_dest, exclude)
Clone "local_root" origin into a new directory and check out a specific branch. Optionally run "git rm". :raise CalledProcessError: Unhandled git command failure. :raise GitError: Handled git failures. :param str local_root: Local path to git root directory. :param str new_root: Local path empty directory in which branch will be cloned into. :param str remote: The git remote to clone from to. :param str branch: Checkout this branch. :param str rel_dest: Run "git rm" on this directory if exclude is truthy. :param iter exclude: List of strings representing relative file paths to exclude from "git rm".
2.557867
2.525754
1.012714
log = logging.getLogger(__name__) current_branch = run_command(local_root, ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip() run_command(local_root, ['git', 'add', '.']) # Check if there are no changes. try: run_command(local_root, ['git', 'diff', 'HEAD', '--no-ext-diff', '--quiet', '--exit-code']) except CalledProcessError: pass # Repo is dirty, something has changed. else: log.info('No changes to commit.') return True # Check if there are changes excluding those files that always change. output = run_command(local_root, ['git', 'diff', 'HEAD', '--no-ext-diff', '--name-status']) for status, name in (l.split('\t', 1) for l in output.splitlines()): if status != 'M': break # Only looking for modified files. components = name.split('/') if '.doctrees' not in components and components[-1] != 'searchindex.js': break # Something other than those two dirs/files has changed. else: log.info('No significant changes to commit.') return True # Commit. latest_commit = sorted(versions.remotes, key=lambda v: v['date'])[-1] commit_message_file = os.path.join(local_root, '_scv_commit_message.txt') with open(commit_message_file, 'w') as handle: handle.write('AUTO sphinxcontrib-versioning {} {}\n\n'.format( datetime.utcfromtimestamp(latest_commit['date']).strftime('%Y%m%d'), latest_commit['sha'][:11], )) for line in ('{}: {}\n'.format(v, os.environ[v]) for v in WHITELIST_ENV_VARS if v in os.environ): handle.write(line) try: run_command(local_root, ['git', 'commit', '-F', commit_message_file]) except CalledProcessError as exc: raise GitError('Failed to commit locally.', exc.output) os.remove(commit_message_file) # Push. try: run_command(local_root, ['git', 'push', remote, current_branch]) except CalledProcessError as exc: if '[rejected]' in exc.output and '(fetch first)' in exc.output: log.debug('Remote has changed since cloning the repo. Must retry.') return False raise GitError('Failed to push to remote.', exc.output) log.info('Successfully pushed to remote repository.') return True
def commit_and_push(local_root, remote, versions)
Commit changed, new, and deleted files in the repo and attempt to push the branch to the remote repository. :raise CalledProcessError: Unhandled git command failure. :raise GitError: Conflicting changes made in remote by other client and bad git config for commits. :param str local_root: Local path to git root directory. :param str remote: The git remote to push to. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :return: If push succeeded. :rtype: bool
3.036968
2.920043
1.040042
matches = [(RE_SEMVER.findall(n) or [[]])[0] for n in names] max_len_ints = 0 max_len_str = 0 # Get max lens for padding. for match in (m for m in matches if m): max_len_ints = len(match) # Never changes. max_len_str = max(max_len_str, len(match[-1])) if not max_len_ints: return matches # Nothing to do, all empty. invalid_template = [1] + [0] * (max_len_ints + max_len_str - 1) # Parse. exploded_semver = list() for match in matches: if not match: exploded_semver.append(invalid_template[:]) continue version_ints = [-int(i or 0) for i in match[:-1]] ints_of_str = [ord(i) for i in match[-1]] + [0] * (max_len_str - len(match[-1])) exploded_semver.append([0] + version_ints + ints_of_str) return exploded_semver
def semvers(names)
Parse versions into integers and convert non-integer meta indicators into integers with ord(). Each return list item has an indicator as the first item. 0 for valid versions and 1 for invalid. Can be used to sort non-version names (e.g. master, feature_branch, etc) after valid versions. No sorting is done in this function though. Read multi_sort() docstring for reasoning behind inverted integers in version_ints variable. :param iter names: List of strings representing versions/tags/branches. :return: List of parsed versions. E.g. v1.10.0b3 -> [0, 1, 10, 0, ord('b'), ord('3')] :rtype: list
3.559825
3.480126
1.022901
exploded_alpha = list() exploded_semver = list() # Convert name to int if alpha is in sort. if 'alpha' in sort: alpha_max_len = max(len(r['name']) for r in remotes) for name in (r['name'] for r in remotes): exploded_alpha.append([ord(i) for i in name] + [0] * (alpha_max_len - len(name))) # Parse versions if semver is in sort. if 'semver' in sort: exploded_semver = semvers(r['name'] for r in remotes) # Build sort_mapping dict. sort_mapping = dict() for i, remote in enumerate(remotes): key = list() for sort_by in sort: if sort_by == 'alpha': key.extend(exploded_alpha[i]) elif sort_by == 'time': key.append(-remote['date']) elif sort_by == 'semver': key.extend(exploded_semver[i]) sort_mapping[id(remote)] = key # Sort. remotes.sort(key=lambda k: sort_mapping.get(id(k)))
def multi_sort(remotes, sort)
Sort `remotes` in place. Allows sorting by multiple conditions. This is needed because Python 3 no longer supports sorting lists of multiple types. Sort keys must all be of the same type. Problem: the user expects versions to be sorted latest first and timelogical to be most recent first (when viewing the HTML documentation), yet expects alphabetical sorting to be A before Z. Solution: invert integers (dates and parsed versions). :param iter remotes: List of dicts from Versions().remotes. :param iter sort: What to sort by. May be one or more of: alpha, time, semver
3.019439
2.765108
1.091979
return [(r['name'], self.vpathto(r['name'])) for r in self.remotes if r['kind'] == 'heads']
def branches(self)
Return list of (name and urls) only branches.
12.804674
8.927918
1.434228
return [(r['name'], self.vpathto(r['name'])) for r in self.remotes if r['kind'] == 'tags']
def tags(self)
Return list of (name and urls) only tags.
12.06612
9.195893
1.312121
if self.context['current_version'] == other_version: return True return self.context['pagename'] in self[other_version]['found_docs']
def vhasdoc(self, other_version)
Return True if the other version has the current document. Like Sphinx's hasdoc(). :raise KeyError: If other_version doesn't exist. :param str other_version: Version to link to. :return: If current document is in the other version. :rtype: bool
8.036587
8.017557
1.002373
is_root = self.context['scv_is_root'] pagename = self.context['pagename'] if self.context['current_version'] == other_version and not is_root: return '{}.html'.format(pagename.split('/')[-1]) other_remote = self[other_version] other_root_dir = other_remote['root_dir'] components = ['..'] * pagename.count('/') components += [other_root_dir] if is_root else ['..', other_root_dir] components += [pagename if self.vhasdoc(other_version) else other_remote['master_doc']] return '{}.html'.format(__import__('posixpath').join(*components))
def vpathto(self, other_version)
Return relative path to current document in another version. Like Sphinx's pathto(). If the current document doesn't exist in the other version its master_doc path is returned instead. :raise KeyError: If other_version doesn't exist. :param str other_version: Version to link to. :return: Relative path. :rtype: str
5.336094
4.91787
1.085042
log = logging.getLogger(__name__) # Attempt to read. log.info('Reading config from %s...', local_conf) try: config = read_config(os.path.dirname(local_conf), '<local>') except HandledError: log.warning('Unable to read file, continuing with only CLI args.') return dict() # Filter and return. return {k[4:]: v for k, v in config.items() if k.startswith('scv_') and not k[4:].startswith('_')}
def read_local_conf(local_conf)
Search for conf.py in any rel_source directory in CWD and if found read it and return. :param str local_conf: Path to conf.py to read. :return: Loaded conf.py. :rtype: dict
4.948019
5.250463
0.942397
log = logging.getLogger(__name__) # List remote. log.info('Getting list of all remote branches/tags...') try: remotes = list_remote(root) except GitError as exc: log.error(exc.message) log.error(exc.output) raise HandledError log.info('Found: %s', ' '.join(i[1] for i in remotes)) # Filter and date. try: try: dates_paths = filter_and_date(root, conf_rel_paths, (i[0] for i in remotes)) except GitError: log.info('Need to fetch from remote...') fetch_commits(root, remotes) try: dates_paths = filter_and_date(root, conf_rel_paths, (i[0] for i in remotes)) except GitError as exc: log.error(exc.message) log.error(exc.output) raise HandledError except subprocess.CalledProcessError as exc: log.debug(json.dumps(dict(command=exc.cmd, cwd=root, code=exc.returncode, output=exc.output))) log.error('Failed to get dates for all remote commits.') raise HandledError filtered_remotes = [[i[0], i[1], i[2], ] + dates_paths[i[0]] for i in remotes if i[0] in dates_paths] log.info('With docs: %s', ' '.join(i[1] for i in filtered_remotes)) if not whitelist_branches and not whitelist_tags: return filtered_remotes # Apply whitelist. whitelisted_remotes = list() for remote in filtered_remotes: if remote[2] == 'heads' and whitelist_branches: if not any(re.search(p, remote[1]) for p in whitelist_branches): continue if remote[2] == 'tags' and whitelist_tags: if not any(re.search(p, remote[1]) for p in whitelist_tags): continue whitelisted_remotes.append(remote) log.info('Passed whitelisting: %s', ' '.join(i[1] for i in whitelisted_remotes)) return whitelisted_remotes
def gather_git_info(root, conf_rel_paths, whitelist_branches, whitelist_tags)
Gather info about the remote git repository. Get list of refs. :raise HandledError: If function fails with a handled error. Will be logged before raising. :param str root: Root directory of repository. :param iter conf_rel_paths: List of possible relative paths (to git root) of Sphinx conf.py (e.g. docs/conf.py). :param iter whitelist_branches: Optional list of patterns to filter branches by. :param iter whitelist_tags: Optional list of patterns to filter tags by. :return: Commits with docs. A list of tuples: (sha, name, kind, date, conf_rel_path). :rtype: list
2.387206
2.29793
1.038851
log = logging.getLogger(__name__) exported_root = TempDir(True).name # Extract all. for sha in {r['sha'] for r in versions.remotes}: target = os.path.join(exported_root, sha) log.debug('Exporting %s to temporary directory.', sha) export(local_root, sha, target) # Build root. remote = versions[Config.from_context().root_ref] with TempDir() as temp_dir: log.debug('Building root (before setting root_dirs) in temporary directory: %s', temp_dir) source = os.path.dirname(os.path.join(exported_root, remote['sha'], remote['conf_rel_path'])) build(source, temp_dir, versions, remote['name'], True) existing = os.listdir(temp_dir) # Define root_dir for all versions to avoid file name collisions. for remote in versions.remotes: root_dir = RE_INVALID_FILENAME.sub('_', remote['name']) while root_dir in existing: root_dir += '_' remote['root_dir'] = root_dir log.debug('%s root directory is %s', remote['name'], root_dir) existing.append(root_dir) # Get found_docs and master_doc values for all versions. for remote in list(versions.remotes): log.debug('Partially running sphinx-build to read configuration for: %s', remote['name']) source = os.path.dirname(os.path.join(exported_root, remote['sha'], remote['conf_rel_path'])) try: config = read_config(source, remote['name']) except HandledError: log.warning('Skipping. Will not be building: %s', remote['name']) versions.remotes.pop(versions.remotes.index(remote)) continue remote['found_docs'] = config['found_docs'] remote['master_doc'] = config['master_doc'] return exported_root
def pre_build(local_root, versions)
Build docs for all versions to determine root directory and master_doc names. Need to build docs to (a) avoid filename collision with files from root_ref and branch/tag names and (b) determine master_doc config values for all versions (in case master_doc changes from e.g. contents.rst to index.rst between versions). Exports all commits into a temporary directory and returns the path to avoid re-exporting during the final build. :param str local_root: Local path to git root directory. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :return: Tempdir path with exported commits as subdirectories. :rtype: str
3.986949
3.719277
1.071969
log = logging.getLogger(__name__) while True: # Build root. remote = versions[Config.from_context().root_ref] log.info('Building root: %s', remote['name']) source = os.path.dirname(os.path.join(exported_root, remote['sha'], remote['conf_rel_path'])) build(source, destination, versions, remote['name'], True) # Build all refs. for remote in list(versions.remotes): log.info('Building ref: %s', remote['name']) source = os.path.dirname(os.path.join(exported_root, remote['sha'], remote['conf_rel_path'])) target = os.path.join(destination, remote['root_dir']) try: build(source, target, versions, remote['name'], False) except HandledError: log.warning('Skipping. Will not be building %s. Rebuilding everything.', remote['name']) versions.remotes.pop(versions.remotes.index(remote)) break # Break out of for loop. else: break
def build_all(exported_root, destination, versions)
Build all versions. :param str exported_root: Tempdir path with exported commits as subdirectories. :param str destination: Destination directory to copy/overwrite built docs to. Does not delete old files. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
3.643949
3.609935
1.009422
# Used internally. For rebuilding all pages when one or versions fail. app.add_config_value('sphinxcontrib_versioning_versions', SC_VERSIONING_VERSIONS, 'html') # Needed for banner. app.config.html_static_path.append(STATIC_DIR) app.add_stylesheet('banner.css') # Tell Sphinx which config values can be set by the user. for name, default in Config(): app.add_config_value('scv_{}'.format(name), default, 'html') # Event handlers. app.connect('builder-inited', EventHandlers.builder_inited) app.connect('env-updated', EventHandlers.env_updated) app.connect('html-page-context', EventHandlers.html_page_context) return dict(version=__version__)
def setup(app)
Called by Sphinx during phase 0 (initialization). :param sphinx.application.Sphinx app: Sphinx application object. :returns: Extension version. :rtype: dict
4.858236
5.189642
0.936141
# Patch. application.Config = ConfigInject if config.show_banner: EventHandlers.BANNER_GREATEST_TAG = config.banner_greatest_tag EventHandlers.BANNER_MAIN_VERSION = config.banner_main_ref EventHandlers.BANNER_RECENT_TAG = config.banner_recent_tag EventHandlers.SHOW_BANNER = True EventHandlers.CURRENT_VERSION = current_name EventHandlers.IS_ROOT = is_root EventHandlers.VERSIONS = versions SC_VERSIONING_VERSIONS[:] = [p for r in versions.remotes for p in sorted(r.items()) if p[0] not in ('sha', 'date')] # Update argv. if config.verbose > 1: argv += ('-v',) * (config.verbose - 1) if config.no_colors: argv += ('-N',) if config.overflow: argv += config.overflow # Build. result = build_main(argv) if result != 0: raise SphinxError
def _build(argv, config, versions, current_name, is_root)
Build Sphinx docs via multiprocessing for isolation. :param tuple argv: Arguments to pass to Sphinx. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :param str current_name: The ref name of the current version being built. :param bool is_root: Is this build in the web root?
6.035353
5.945185
1.015167
# Patch. EventHandlers.ABORT_AFTER_READ = queue # Run. _build(argv, config, Versions(list()), current_name, False)
def _read_config(argv, config, current_name, queue)
Read the Sphinx config via multiprocessing for isolation. :param tuple argv: Arguments to pass to Sphinx. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param str current_name: The ref name of the current version being built. :param multiprocessing.queues.Queue queue: Communication channel to parent process.
31.51399
42.488602
0.741705
log = logging.getLogger(__name__) argv = ('sphinx-build', source, target) config = Config.from_context() log.debug('Running sphinx-build for %s with args: %s', current_name, str(argv)) child = multiprocessing.Process(target=_build, args=(argv, config, versions, current_name, is_root)) child.start() child.join() # Block. if child.exitcode != 0: log.error('sphinx-build failed for branch/tag: %s', current_name) raise HandledError
def build(source, target, versions, current_name, is_root)
Build Sphinx docs for one version. Includes Versions class instance with names/urls in the HTML context. :raise HandledError: If sphinx-build fails. Will be logged before raising. :param str source: Source directory to pass to sphinx-build. :param str target: Destination directory to write documentation to (passed to sphinx-build). :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :param str current_name: The ref name of the current version being built. :param bool is_root: Is this build in the web root?
3.841459
3.274193
1.173254