sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def fetch_all(self, R, depth=1, **kwargs): "Request multiple objects from API" d, e = self._fetcher.fetch_all(R, depth, kwargs) if e: raise e return d
Request multiple objects from API
entailment
def get(self, res, pk): "Get a resource instance by primary key (id)" B = get_backend() return B.get_object(B.get_concrete(res), pk)
Get a resource instance by primary key (id)
entailment
def all(self, res): "Get resources using a filter condition" B = get_backend() return B.get_objects(B.get_concrete(res))
Get resources using a filter condition
entailment
def run_task(func): """ Decorator to collect and return generator results, returning a list if there are multiple results """ def _wrapped(*a, **k): gen = func(*a, **k) return _consume_task(gen) return _wrapped
Decorator to collect and return generator results, returning a list if there are multiple results
entailment
def get(self, typ, id, **kwargs): """ Load type by id """ return self._load(self._request(typ, id=id, params=kwargs))
Load type by id
entailment
def _request(self, typ, id=0, method='GET', params=None, data=None, url=None): """ send the request, return response obj """ backend, backend_version = peeringdb.get_backend_info() user_agent = 'PeeringDB/{} {}/{}'.format(peeringdb.__version__, backend, backend_version) headers = { "Accept": "application/json", "User-Agent": user_agent, } auth = None if self.user: auth = (self.user, self.password) if not url: if id: url = "%s/%s/%s" % (self.url, typ, id) else: url = "%s/%s" % (self.url, typ) return requests.request(method, url, params=params, data=data, auth=auth, headers=headers)
send the request, return response obj
entailment
def wrap_generator(func): """ Decorator to convert a generator function to an async function which collects and returns generator results, returning a list if there are multiple results """ async def _wrapped(*a, **k): r, ret = None, [] gen = func(*a, **k) while True: try: item = gen.send(r) except StopIteration: break if inspect.isawaitable(item): r = await item else: r = item ret.append(r) if len(ret) == 1: return ret.pop() return ret return _wrapped
Decorator to convert a generator function to an async function which collects and returns generator results, returning a list if there are multiple results
entailment
def run_task(func): """ Decorator to wrap an async function in an event loop. Use for main sync interface methods. """ def _wrapped(*a, **k): loop = asyncio.get_event_loop() return loop.run_until_complete(func(*a, **k)) return _wrapped
Decorator to wrap an async function in an event loop. Use for main sync interface methods.
entailment
def split_ref(string): """ splits a string into (tag, id) """ re_tag = re.compile('^(?P<tag>[a-zA-Z]+)[\s-]*(?P<pk>\d+)$') m = re_tag.search(string) if not m: raise ValueError("unable to split string '%s'" % (string, )) return (m.group('tag').lower(), int(m.group('pk')))
splits a string into (tag, id)
entailment
def prompt(msg, default=None): "Prompt for input" if default is not None: msg = '{} ({})'.format(msg, repr(default)) msg = '{}: '.format(msg) try: s = input(msg) except KeyboardInterrupt: exit(1) except EOFError: s = '' if not s: s = default return s
Prompt for input
entailment
def limit_mem(limit=(4 * 1024**3)): "Set soft memory limit" rsrc = resource.RLIMIT_DATA soft, hard = resource.getrlimit(rsrc) resource.setrlimit(rsrc, (limit, hard)) # 4GB softnew, _ = resource.getrlimit(rsrc) assert softnew == limit _log = logging.getLogger(__name__) _log.debug('Set soft memory limit: %s => %s', soft, softnew)
Set soft memory limit
entailment
def render_module(inits, calls, inputs, outputs, dst_dir, pytorch_dict, pytorch_module_name): """ Render model. """ inits = [i for i in inits if len(i) > 0] output = pytorch_model_template.format(**{ 'module_name': pytorch_module_name, 'module_name_lower': pytorch_module_name.lower(), 'inits': '\n'.join(inits), 'inputs': inputs, 'calls': '\n'.join(calls), 'outputs': outputs, }) if dst_dir is not None: import os import errno try: os.makedirs(dst_dir) except OSError as e: if e.errno != errno.EEXIST: raise with open(os.path.join(dst_dir, pytorch_module_name.lower() + '.py'), 'w+') as f: f.write(output) f.close() torch.save(pytorch_dict, os.path.join(dst_dir, pytorch_module_name.lower() + '.pt')) return output
Render model.
entailment
def gluon2pytorch(net, args, dst_dir, pytorch_module_name, debug=True, keep_names=False): """ Function to convert a model. """ x = [mx.nd.array(np.ones(i)) for i in args] x = net(*x) # Get network params params = net.collect_params() # Create a symbol to trace net x = [mx.sym.var('__input__' + str(i)) for i in range(len(args))] sym = net(*x) if len(sym) > 1: group = mx.sym.Group(sym) else: group = sym # Get JSON-definition of the model json_model = json.loads(group.tojson())['nodes'] # Create empty accumulators nodes = [] is_skipped = [] pytorch_dict = {} inits = [] calls = [] inputs = [] outputs = [i[0] for i in json.loads(group.tojson())['heads']] last = 0 if keep_names: names_dict = {} else: names_dict = None # Trace model for i, node in enumerate(json_model): # If the node has 'null' op, it means, that it's not a real op, but only parameter # TODO: convert constants if keep_names: names_dict[i] = node['name'] if node['op'] == 'null': if node['name'].find('__input__') == 0: inputs.append(int(node['name'][9:])) is_skipped.append(1) continue # It's not 'null' is_skipped.append(0) # Create dict with necessary node parameters op = { 'name': node['name'][:-4], 'type': node['op'], } print(op, node) if len(node['inputs']) > 0: orginal_inputs = [i for i in np.array(node['inputs'])[:, 0] if i in inputs] op['inputs'] = [i for i in np.array(node['inputs'])[:, 0] if is_skipped[i] != 1 or i in orginal_inputs] else: print(json_model) op['inputs'] = [] try: # Not all nodes have 'attrs' op['attrs'] = node['attrs'] except KeyError: op['attrs'] = {} # Debug output if debug: print(op) print('__') # Append new node to list nodes.append(op) # If operation is in available convertors, convert it if op['type'] in CONVERTERS: init_str, call_str = CONVERTERS[op['type']](i, op, nodes, params, pytorch_dict, names_dict, debug) inits.append(init_str) calls.append(call_str) else: raise AttributeError('Layer isn\'t supported') if names_dict is not None: inputs = ', '.join([names_dict[i] for i in inputs]) outputs = ', '.join([names_dict[i] for i in outputs]) else: inputs = ', '.join(['x' + str(i) for i in inputs]) outputs = ', '.join(['x' + str(i) for i in outputs]) pytorch_source = render_module(inits, calls, inputs, outputs, dst_dir, pytorch_dict, pytorch_module_name) return eval_model(pytorch_source, pytorch_dict, pytorch_module_name)
Function to convert a model.
entailment
def hook_odoo(package): """ work around Odoo 10 issue https://github.com/acsone/setuptools-odoo/issues/10 # This hook should runs after all *-nspkg.pth files because it is named # zzz_ and .pth file run in alphabetical order. """ if sys.version_info.major != 2: return if package.__name__ == 'odoo': if not hasattr(package, 'release'): # Since 'release' is not in the odoo package, it means # odoo/__init__.py did not run, so what we have here is a dummy # odoo package created by setuptools' *-nspkg.pth files. # We remove it so 'import odoo' that will be done in the actual # main program will have a chance to run odoo/__init__.py. if 'odoo.addons' in sys.modules: del sys.modules['odoo.addons'] if 'odoo' in sys.modules: del sys.modules['odoo']
work around Odoo 10 issue https://github.com/acsone/setuptools-odoo/issues/10 # This hook should runs after all *-nspkg.pth files because it is named # zzz_ and .pth file run in alphabetical order.
entailment
def getAllFtpConnections(self): """ Returns a dictionary containing active ftp connections. """ outputMsg = "Current ftp connections:\n" counter = 1 for k in self.ftpList: outputMsg += str(counter) + ". " + k + " " outputMsg += str(self.ftpList[k]) + "\n" counter += 1 if self.printOutput: logger.info(outputMsg) return self.ftpList
Returns a dictionary containing active ftp connections.
entailment
def ftp_connect(self, host, user='anonymous', password='anonymous@', port=21, timeout=30, connId='default'): """ Constructs FTP object, opens a connection and login. Call this function before any other (otherwise raises exception). Returns server output. Parameters: - host - server host address - user(optional) - FTP user name. If not given, 'anonymous' is used. - password(optional) - FTP password. If not given, 'anonymous@' is used. - port(optional) - TCP port. By default 21. - timeout(optional) - timeout in seconds. By default 30. - connId(optional) - connection identifier. By default equals 'default' Examples: | ftp connect | 192.168.1.10 | mylogin | mypassword | | | | ftp connect | 192.168.1.10 | | | | | | ftp connect | 192.168.1.10 | mylogin | mypassword | connId=secondConn | | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | 20 | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | | | ftp connect | 192.168.1.10 | mylogin | mypassword | timeout=20 | | | ftp connect | 192.168.1.10 | port=29 | timeout=20 | | | """ if connId in self.ftpList: errMsg = "Connection with ID %s already exist. It should be deleted before this step." % connId raise FtpLibraryError(errMsg) else: newFtp = None outputMsg = "" try: timeout = int(timeout) port = int(port) newFtp = ftplib.FTP() outputMsg += newFtp.connect(host, port, timeout) outputMsg += newFtp.login(user,password) except socket.error as se: raise FtpLibraryError('Socket error exception occured.') except ftplib.all_errors as e: raise FtpLibraryError(str(e)) except Exception as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) self.__addNewConnection(newFtp, connId)
Constructs FTP object, opens a connection and login. Call this function before any other (otherwise raises exception). Returns server output. Parameters: - host - server host address - user(optional) - FTP user name. If not given, 'anonymous' is used. - password(optional) - FTP password. If not given, 'anonymous@' is used. - port(optional) - TCP port. By default 21. - timeout(optional) - timeout in seconds. By default 30. - connId(optional) - connection identifier. By default equals 'default' Examples: | ftp connect | 192.168.1.10 | mylogin | mypassword | | | | ftp connect | 192.168.1.10 | | | | | | ftp connect | 192.168.1.10 | mylogin | mypassword | connId=secondConn | | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | 20 | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | | | ftp connect | 192.168.1.10 | mylogin | mypassword | timeout=20 | | | ftp connect | 192.168.1.10 | port=29 | timeout=20 | | |
entailment
def get_welcome(self, connId='default'): """ Returns wlecome message of FTP server. Parameters: - connId(optional) - connection identifier. By default equals 'default' """ thisConn = self.__getConnection(connId) outputMsg = "" try: outputMsg += thisConn.getwelcome() except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
Returns wlecome message of FTP server. Parameters: - connId(optional) - connection identifier. By default equals 'default'
entailment
def dir(self, connId='default'): """ Returns list of raw lines returned as contens of current directory. Parameters: - connId(optional) - connection identifier. By default equals 'default' """ dirList = [] thisConn = self.__getConnection(connId) outputMsg = "" try: thisConn.dir(dirList.append) for d in dirList: outputMsg += str(d) + "\n" except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return dirList
Returns list of raw lines returned as contens of current directory. Parameters: - connId(optional) - connection identifier. By default equals 'default'
entailment
def dir_names(self, connId='default'): """ Returns list of files (and/or directories) of current directory. Parameters: - connId(optional) - connection identifier. By default equals 'default' """ files_list = [] thisConn = self.__getConnection(connId) try: files_list = thisConn.nlst() except: files_list = [] return files_list
Returns list of files (and/or directories) of current directory. Parameters: - connId(optional) - connection identifier. By default equals 'default'
entailment
def download_file(self, remoteFileName, localFilePath=None, connId='default'): """ Downloads file from current directory on FTP server in binary mode. If localFilePath is not given, file is saved in current local directory (by default folder containing robot framework project file) with the same name as source file. Returns server output Parameters: - remoteFileName - file name on FTP server - localFilePath (optional) - local file name or path where remote file should be saved. - connId(optional) - connection identifier. By default equals 'default' localFilePath variable can have following meanings: 1. file name (will be saved in current default directory); 2. full path (dir + file name) 3. dir path (original file name will be added) Examples: | download file | a.txt | | | | download file | a.txt | b.txt | connId=ftp1 | | download file | a.txt | D:/rfftppy/tmp | | | download file | a.txt | D:/rfftppy/tmp/b.txt | | | download file | a.txt | D:\\rfftppy\\tmp\\c.txt | | """ thisConn = self.__getConnection(connId) outputMsg = "" localPath = "" if localFilePath == None: localPath = remoteFileName else: localPath = os.path.normpath(localFilePath) if os.path.isdir(localPath): localPath = os.path.join(localPath, remoteFileName) try: with open(localPath, 'wb') as localFile: outputMsg += thisConn.retrbinary("RETR " + remoteFileName, localFile.write) except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
Downloads file from current directory on FTP server in binary mode. If localFilePath is not given, file is saved in current local directory (by default folder containing robot framework project file) with the same name as source file. Returns server output Parameters: - remoteFileName - file name on FTP server - localFilePath (optional) - local file name or path where remote file should be saved. - connId(optional) - connection identifier. By default equals 'default' localFilePath variable can have following meanings: 1. file name (will be saved in current default directory); 2. full path (dir + file name) 3. dir path (original file name will be added) Examples: | download file | a.txt | | | | download file | a.txt | b.txt | connId=ftp1 | | download file | a.txt | D:/rfftppy/tmp | | | download file | a.txt | D:/rfftppy/tmp/b.txt | | | download file | a.txt | D:\\rfftppy\\tmp\\c.txt | |
entailment
def upload_file(self, localFileName, remoteFileName=None, connId='default'): """ Sends file from local drive to current directory on FTP server in binary mode. Returns server output. Parameters: - localFileName - file name or path to a file on a local drive. - remoteFileName (optional) - a name or path containing name under which file should be saved. - connId(optional) - connection identifier. By default equals 'default' If remoteFileName agument is not given, local name will be used. Examples: | upload file | x.txt | connId=ftp1 | | upload file | D:/rfftppy/y.txt | | | upload file | u.txt | uu.txt | | upload file | D:/rfftppy/z.txt | zz.txt | | upload file | D:\\rfftppy\\v.txt | | """ thisConn = self.__getConnection(connId) outputMsg = "" remoteFileName_ = "" localFilePath = os.path.normpath(localFileName) if not os.path.isfile(localFilePath): raise FtpLibraryError("Valid file path should be provided.") else: if remoteFileName==None: fileTuple = os.path.split(localFileName) if len(fileTuple)==2: remoteFileName_ = fileTuple[1] else: remoteFileName_ = 'defaultFileName' else: remoteFileName_ = remoteFileName try: outputMsg += thisConn.storbinary("STOR " + remoteFileName_, open(localFilePath, "rb")) except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
Sends file from local drive to current directory on FTP server in binary mode. Returns server output. Parameters: - localFileName - file name or path to a file on a local drive. - remoteFileName (optional) - a name or path containing name under which file should be saved. - connId(optional) - connection identifier. By default equals 'default' If remoteFileName agument is not given, local name will be used. Examples: | upload file | x.txt | connId=ftp1 | | upload file | D:/rfftppy/y.txt | | | upload file | u.txt | uu.txt | | upload file | D:/rfftppy/z.txt | zz.txt | | upload file | D:\\rfftppy\\v.txt | |
entailment
def size(self, fileToCheck, connId='default'): """ Checks size of a file on FTP server. Returns size of a file in bytes (integer). Parameters: - fileToCheck - file name or path to a file on FTP server - connId(optional) - connection identifier. By default equals 'default' Example: | ${file1size} = | size | /home/myname/tmp/uu.txt | connId=ftp1 | | Should Be Equal As Numbers | ${file1size} | 31 | | Note that the SIZE command is not standardized, but is supported by many common server implementations. """ thisConn = self.__getConnection(connId) outputMsg = "" try: tmpSize = thisConn.size(fileToCheck) outputMsg += str(tmpSize) except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
Checks size of a file on FTP server. Returns size of a file in bytes (integer). Parameters: - fileToCheck - file name or path to a file on FTP server - connId(optional) - connection identifier. By default equals 'default' Example: | ${file1size} = | size | /home/myname/tmp/uu.txt | connId=ftp1 | | Should Be Equal As Numbers | ${file1size} | 31 | | Note that the SIZE command is not standardized, but is supported by many common server implementations.
entailment
def send_cmd(self, command, connId='default'): """ Sends any command to FTP server. Returns server output. Parameters: - command - any valid command to be sent (invalid will result in exception). - connId(optional) - connection identifier. By default equals 'default' Example: | send cmd | HELP | """ thisConn = self.__getConnection(connId) outputMsg = "" try: outputMsg += str(thisConn.sendcmd(command)) except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
Sends any command to FTP server. Returns server output. Parameters: - command - any valid command to be sent (invalid will result in exception). - connId(optional) - connection identifier. By default equals 'default' Example: | send cmd | HELP |
entailment
def ftp_close(self, connId='default'): """ Closes FTP connection. Returns None. Parameters: - connId(optional) - connection identifier. By default equals 'default' """ thisConn = self.__getConnection(connId) try: thisConn.quit() self.__removeConnection(connId) except Exception as e: try: thisConn.close() self.__removeConnection(connId) except ftplib.all_errors as x: raise FtpLibraryError(str(x))
Closes FTP connection. Returns None. Parameters: - connId(optional) - connection identifier. By default equals 'default'
entailment
def setup_logging(verbose=0, colors=False, name=None): """Configure console logging. Info and below go to stdout, others go to stderr. :param int verbose: Verbosity level. > 0 print debug statements. > 1 passed to sphinx-build. :param bool colors: Print color text in non-verbose mode. :param str name: Which logger name to set handlers to. Used for testing. """ root_logger = logging.getLogger(name) root_logger.setLevel(logging.DEBUG if verbose > 0 else logging.INFO) formatter = ColorFormatter(verbose > 0, colors) if colors: colorclass.Windows.enable() handler_stdout = logging.StreamHandler(sys.stdout) handler_stdout.setFormatter(formatter) handler_stdout.setLevel(logging.DEBUG) handler_stdout.addFilter(type('', (logging.Filter,), {'filter': staticmethod(lambda r: r.levelno <= logging.INFO)})) root_logger.addHandler(handler_stdout) handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(formatter) handler_stderr.setLevel(logging.WARNING) root_logger.addHandler(handler_stderr)
Configure console logging. Info and below go to stdout, others go to stderr. :param int verbose: Verbosity level. > 0 print debug statements. > 1 passed to sphinx-build. :param bool colors: Print color text in non-verbose mode. :param str name: Which logger name to set handlers to. Used for testing.
entailment
def format(self, record): """Apply little arrow and colors to the record. Arrow and colors are only applied to sphinxcontrib.versioning log statements. :param logging.LogRecord record: The log record object to log. """ formatted = super(ColorFormatter, self).format(record) if self.verbose or not record.name.startswith(self.SPECIAL_SCOPE): return formatted # Arrow. formatted = '=> ' + formatted # Colors. if not self.colors: return formatted if record.levelno >= logging.ERROR: formatted = str(colorclass.Color.red(formatted)) elif record.levelno >= logging.WARNING: formatted = str(colorclass.Color.yellow(formatted)) else: formatted = str(colorclass.Color.cyan(formatted)) return formatted
Apply little arrow and colors to the record. Arrow and colors are only applied to sphinxcontrib.versioning log statements. :param logging.LogRecord record: The log record object to log.
entailment
def chunk(iterator, max_size): """Chunk a list/set/etc. :param iter iterator: The iterable object to chunk. :param int max_size: Max size of each chunk. Remainder chunk may be smaller. :return: Yield list of items. :rtype: iter """ gen = iter(iterator) while True: chunked = list() for i, item in enumerate(gen): chunked.append(item) if i >= max_size - 1: break if not chunked: return yield chunked
Chunk a list/set/etc. :param iter iterator: The iterable object to chunk. :param int max_size: Max size of each chunk. Remainder chunk may be smaller. :return: Yield list of items. :rtype: iter
entailment
def run_command(local_root, command, env_var=True, pipeto=None, retry=0, environ=None): """Run a command and return the output. :raise CalledProcessError: Command exits non-zero. :param str local_root: Local path to git root directory. :param iter command: Command to run. :param dict environ: Environment variables to set/override in the command. :param bool env_var: Define GIT_DIR environment variable (on non-Windows). :param function pipeto: Pipe `command`'s stdout to this function (only parameter given). :param int retry: Retry this many times on CalledProcessError after 0.1 seconds. :return: Command output. :rtype: str """ log = logging.getLogger(__name__) # Setup env. env = os.environ.copy() if environ: env.update(environ) if env_var and not IS_WINDOWS: env['GIT_DIR'] = os.path.join(local_root, '.git') else: env.pop('GIT_DIR', None) # Run command. with open(os.devnull) as null: main = Popen(command, cwd=local_root, env=env, stdout=PIPE, stderr=PIPE if pipeto else STDOUT, stdin=null) if pipeto: pipeto(main.stdout) main_output = main.communicate()[1].decode('utf-8') # Might deadlock if stderr is written to a lot. else: main_output = main.communicate()[0].decode('utf-8') log.debug(json.dumps(dict(cwd=local_root, command=command, code=main.poll(), output=main_output))) # Verify success. if main.poll() != 0: if retry < 1: raise CalledProcessError(main.poll(), command, output=main_output) time.sleep(0.1) return run_command(local_root, command, env_var, pipeto, retry - 1) return main_output
Run a command and return the output. :raise CalledProcessError: Command exits non-zero. :param str local_root: Local path to git root directory. :param iter command: Command to run. :param dict environ: Environment variables to set/override in the command. :param bool env_var: Define GIT_DIR environment variable (on non-Windows). :param function pipeto: Pipe `command`'s stdout to this function (only parameter given). :param int retry: Retry this many times on CalledProcessError after 0.1 seconds. :return: Command output. :rtype: str
entailment
def get_root(directory): """Get root directory of the local git repo from any subdirectory within it. :raise GitError: If git command fails (dir not a git repo?). :param str directory: Subdirectory in the local repo. :return: Root directory of repository. :rtype: str """ command = ['git', 'rev-parse', '--show-toplevel'] try: output = run_command(directory, command, env_var=False) except CalledProcessError as exc: raise GitError('Failed to find local git repository root in {}.'.format(repr(directory)), exc.output) if IS_WINDOWS: output = output.replace('/', '\\') return output.strip()
Get root directory of the local git repo from any subdirectory within it. :raise GitError: If git command fails (dir not a git repo?). :param str directory: Subdirectory in the local repo. :return: Root directory of repository. :rtype: str
entailment
def list_remote(local_root): """Get remote branch/tag latest SHAs. :raise GitError: When git ls-remote fails. :param str local_root: Local path to git root directory. :return: List of tuples containing strings. Each tuple is sha, name, kind. :rtype: list """ command = ['git', 'ls-remote', '--heads', '--tags'] try: output = run_command(local_root, command) except CalledProcessError as exc: raise GitError('Git failed to list remote refs.', exc.output) # Dereference annotated tags if any. No need to fetch annotations. if '^{}' in output: parsed = list() for group in (m.groupdict() for m in RE_REMOTE.finditer(output)): dereferenced, name, kind = group['name'].endswith('^{}'), group['name'][:-3], group['kind'] if dereferenced and parsed and kind == parsed[-1]['kind'] == 'tags' and name == parsed[-1]['name']: parsed[-1]['sha'] = group['sha'] else: parsed.append(group) else: parsed = [m.groupdict() for m in RE_REMOTE.finditer(output)] return [[i['sha'], i['name'], i['kind']] for i in parsed]
Get remote branch/tag latest SHAs. :raise GitError: When git ls-remote fails. :param str local_root: Local path to git root directory. :return: List of tuples containing strings. Each tuple is sha, name, kind. :rtype: list
entailment
def filter_and_date(local_root, conf_rel_paths, commits): """Get commit Unix timestamps and first matching conf.py path. Exclude commits with no conf.py file. :raise CalledProcessError: Unhandled git command failure. :raise GitError: A commit SHA has not been fetched. :param str local_root: Local path to git root directory. :param iter conf_rel_paths: List of possible relative paths (to git root) of Sphinx conf.py (e.g. docs/conf.py). :param iter commits: List of commit SHAs. :return: Commit time (seconds since Unix epoch) for each commit and conf.py path. SHA keys and [int, str] values. :rtype: dict """ dates_paths = dict() # Filter without docs. for commit in commits: if commit in dates_paths: continue command = ['git', 'ls-tree', '--name-only', '-r', commit] + conf_rel_paths try: output = run_command(local_root, command) except CalledProcessError as exc: raise GitError('Git ls-tree failed on {0}'.format(commit), exc.output) if output: dates_paths[commit] = [None, output.splitlines()[0].strip()] # Get timestamps by groups of 50. command_prefix = ['git', 'show', '--no-patch', '--pretty=format:%ct'] for commits_group in chunk(dates_paths, 50): command = command_prefix + commits_group output = run_command(local_root, command) timestamps = [int(i) for i in RE_UNIX_TIME.findall(output)] for i, commit in enumerate(commits_group): dates_paths[commit][0] = timestamps[i] # Done. return dates_paths
Get commit Unix timestamps and first matching conf.py path. Exclude commits with no conf.py file. :raise CalledProcessError: Unhandled git command failure. :raise GitError: A commit SHA has not been fetched. :param str local_root: Local path to git root directory. :param iter conf_rel_paths: List of possible relative paths (to git root) of Sphinx conf.py (e.g. docs/conf.py). :param iter commits: List of commit SHAs. :return: Commit time (seconds since Unix epoch) for each commit and conf.py path. SHA keys and [int, str] values. :rtype: dict
entailment
def fetch_commits(local_root, remotes): """Fetch from origin. :raise CalledProcessError: Unhandled git command failure. :param str local_root: Local path to git root directory. :param iter remotes: Output of list_remote(). """ # Fetch all known branches. command = ['git', 'fetch', 'origin'] run_command(local_root, command) # Fetch new branches/tags. for sha, name, kind in remotes: try: run_command(local_root, ['git', 'reflog', sha]) except CalledProcessError: run_command(local_root, command + ['refs/{0}/{1}'.format(kind, name)]) run_command(local_root, ['git', 'reflog', sha])
Fetch from origin. :raise CalledProcessError: Unhandled git command failure. :param str local_root: Local path to git root directory. :param iter remotes: Output of list_remote().
entailment
def export(local_root, commit, target): """Export git commit to directory. "Extracts" all files at the commit to the target directory. Set mtime of RST files to last commit date. :raise CalledProcessError: Unhandled git command failure. :param str local_root: Local path to git root directory. :param str commit: Git commit SHA to export. :param str target: Directory to export to. """ log = logging.getLogger(__name__) target = os.path.realpath(target) mtimes = list() # Define extract function. def extract(stdout): """Extract tar archive from "git archive" stdout. :param file stdout: Handle to git's stdout pipe. """ queued_links = list() try: with tarfile.open(fileobj=stdout, mode='r|') as tar: for info in tar: log.debug('name: %s; mode: %d; size: %s; type: %s', info.name, info.mode, info.size, info.type) path = os.path.realpath(os.path.join(target, info.name)) if not path.startswith(target): # Handle bad paths. log.warning('Ignoring tar object path %s outside of target directory.', info.name) elif info.isdir(): # Handle directories. if not os.path.exists(path): os.makedirs(path, mode=info.mode) elif info.issym() or info.islnk(): # Queue links. queued_links.append(info) else: # Handle files. tar.extract(member=info, path=target) if os.path.splitext(info.name)[1].lower() == '.rst': mtimes.append(info.name) for info in (i for i in queued_links if os.path.exists(os.path.join(target, i.linkname))): tar.extract(member=info, path=target) except tarfile.TarError as exc: log.debug('Failed to extract output from "git archive" command: %s', str(exc)) # Run command. run_command(local_root, ['git', 'archive', '--format=tar', commit], pipeto=extract) # Set mtime. for file_path in mtimes: last_committed = int(run_command(local_root, ['git', 'log', '-n1', '--format=%at', commit, '--', file_path])) os.utime(os.path.join(target, file_path), (last_committed, last_committed))
Export git commit to directory. "Extracts" all files at the commit to the target directory. Set mtime of RST files to last commit date. :raise CalledProcessError: Unhandled git command failure. :param str local_root: Local path to git root directory. :param str commit: Git commit SHA to export. :param str target: Directory to export to.
entailment
def clone(local_root, new_root, remote, branch, rel_dest, exclude): """Clone "local_root" origin into a new directory and check out a specific branch. Optionally run "git rm". :raise CalledProcessError: Unhandled git command failure. :raise GitError: Handled git failures. :param str local_root: Local path to git root directory. :param str new_root: Local path empty directory in which branch will be cloned into. :param str remote: The git remote to clone from to. :param str branch: Checkout this branch. :param str rel_dest: Run "git rm" on this directory if exclude is truthy. :param iter exclude: List of strings representing relative file paths to exclude from "git rm". """ log = logging.getLogger(__name__) output = run_command(local_root, ['git', 'remote', '-v']) remotes = dict() for match in RE_ALL_REMOTES.findall(output): remotes.setdefault(match[0], [None, None]) if match[2] == 'fetch': remotes[match[0]][0] = match[1] else: remotes[match[0]][1] = match[1] if not remotes: raise GitError('Git repo has no remotes.', output) if remote not in remotes: raise GitError('Git repo missing remote "{}".'.format(remote), output) # Clone. try: run_command(new_root, ['git', 'clone', remotes[remote][0], '--depth=1', '--branch', branch, '.']) except CalledProcessError as exc: raise GitError('Failed to clone from remote repo URL.', exc.output) # Make sure user didn't select a tag as their DEST_BRANCH. try: run_command(new_root, ['git', 'symbolic-ref', 'HEAD']) except CalledProcessError as exc: raise GitError('Specified branch is not a real branch.', exc.output) # Copy all remotes from original repo. for name, (fetch, push) in remotes.items(): try: run_command(new_root, ['git', 'remote', 'set-url' if name == 'origin' else 'add', name, fetch], retry=3) run_command(new_root, ['git', 'remote', 'set-url', '--push', name, push], retry=3) except CalledProcessError as exc: raise GitError('Failed to set git remote URL.', exc.output) # Done if no exclude. if not exclude: return # Resolve exclude paths. exclude_joined = [ os.path.relpath(p, new_root) for e in exclude for p in glob.glob(os.path.join(new_root, rel_dest, e)) ] log.debug('Expanded %s to %s', repr(exclude), repr(exclude_joined)) # Do "git rm". try: run_command(new_root, ['git', 'rm', '-rf', rel_dest]) except CalledProcessError as exc: raise GitError('"git rm" failed to remove ' + rel_dest, exc.output) # Restore files in exclude. run_command(new_root, ['git', 'reset', 'HEAD'] + exclude_joined) run_command(new_root, ['git', 'checkout', '--'] + exclude_joined)
Clone "local_root" origin into a new directory and check out a specific branch. Optionally run "git rm". :raise CalledProcessError: Unhandled git command failure. :raise GitError: Handled git failures. :param str local_root: Local path to git root directory. :param str new_root: Local path empty directory in which branch will be cloned into. :param str remote: The git remote to clone from to. :param str branch: Checkout this branch. :param str rel_dest: Run "git rm" on this directory if exclude is truthy. :param iter exclude: List of strings representing relative file paths to exclude from "git rm".
entailment
def commit_and_push(local_root, remote, versions): """Commit changed, new, and deleted files in the repo and attempt to push the branch to the remote repository. :raise CalledProcessError: Unhandled git command failure. :raise GitError: Conflicting changes made in remote by other client and bad git config for commits. :param str local_root: Local path to git root directory. :param str remote: The git remote to push to. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :return: If push succeeded. :rtype: bool """ log = logging.getLogger(__name__) current_branch = run_command(local_root, ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip() run_command(local_root, ['git', 'add', '.']) # Check if there are no changes. try: run_command(local_root, ['git', 'diff', 'HEAD', '--no-ext-diff', '--quiet', '--exit-code']) except CalledProcessError: pass # Repo is dirty, something has changed. else: log.info('No changes to commit.') return True # Check if there are changes excluding those files that always change. output = run_command(local_root, ['git', 'diff', 'HEAD', '--no-ext-diff', '--name-status']) for status, name in (l.split('\t', 1) for l in output.splitlines()): if status != 'M': break # Only looking for modified files. components = name.split('/') if '.doctrees' not in components and components[-1] != 'searchindex.js': break # Something other than those two dirs/files has changed. else: log.info('No significant changes to commit.') return True # Commit. latest_commit = sorted(versions.remotes, key=lambda v: v['date'])[-1] commit_message_file = os.path.join(local_root, '_scv_commit_message.txt') with open(commit_message_file, 'w') as handle: handle.write('AUTO sphinxcontrib-versioning {} {}\n\n'.format( datetime.utcfromtimestamp(latest_commit['date']).strftime('%Y%m%d'), latest_commit['sha'][:11], )) for line in ('{}: {}\n'.format(v, os.environ[v]) for v in WHITELIST_ENV_VARS if v in os.environ): handle.write(line) try: run_command(local_root, ['git', 'commit', '-F', commit_message_file]) except CalledProcessError as exc: raise GitError('Failed to commit locally.', exc.output) os.remove(commit_message_file) # Push. try: run_command(local_root, ['git', 'push', remote, current_branch]) except CalledProcessError as exc: if '[rejected]' in exc.output and '(fetch first)' in exc.output: log.debug('Remote has changed since cloning the repo. Must retry.') return False raise GitError('Failed to push to remote.', exc.output) log.info('Successfully pushed to remote repository.') return True
Commit changed, new, and deleted files in the repo and attempt to push the branch to the remote repository. :raise CalledProcessError: Unhandled git command failure. :raise GitError: Conflicting changes made in remote by other client and bad git config for commits. :param str local_root: Local path to git root directory. :param str remote: The git remote to push to. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :return: If push succeeded. :rtype: bool
entailment
def semvers(names): """Parse versions into integers and convert non-integer meta indicators into integers with ord(). Each return list item has an indicator as the first item. 0 for valid versions and 1 for invalid. Can be used to sort non-version names (e.g. master, feature_branch, etc) after valid versions. No sorting is done in this function though. Read multi_sort() docstring for reasoning behind inverted integers in version_ints variable. :param iter names: List of strings representing versions/tags/branches. :return: List of parsed versions. E.g. v1.10.0b3 -> [0, 1, 10, 0, ord('b'), ord('3')] :rtype: list """ matches = [(RE_SEMVER.findall(n) or [[]])[0] for n in names] max_len_ints = 0 max_len_str = 0 # Get max lens for padding. for match in (m for m in matches if m): max_len_ints = len(match) # Never changes. max_len_str = max(max_len_str, len(match[-1])) if not max_len_ints: return matches # Nothing to do, all empty. invalid_template = [1] + [0] * (max_len_ints + max_len_str - 1) # Parse. exploded_semver = list() for match in matches: if not match: exploded_semver.append(invalid_template[:]) continue version_ints = [-int(i or 0) for i in match[:-1]] ints_of_str = [ord(i) for i in match[-1]] + [0] * (max_len_str - len(match[-1])) exploded_semver.append([0] + version_ints + ints_of_str) return exploded_semver
Parse versions into integers and convert non-integer meta indicators into integers with ord(). Each return list item has an indicator as the first item. 0 for valid versions and 1 for invalid. Can be used to sort non-version names (e.g. master, feature_branch, etc) after valid versions. No sorting is done in this function though. Read multi_sort() docstring for reasoning behind inverted integers in version_ints variable. :param iter names: List of strings representing versions/tags/branches. :return: List of parsed versions. E.g. v1.10.0b3 -> [0, 1, 10, 0, ord('b'), ord('3')] :rtype: list
entailment
def multi_sort(remotes, sort): """Sort `remotes` in place. Allows sorting by multiple conditions. This is needed because Python 3 no longer supports sorting lists of multiple types. Sort keys must all be of the same type. Problem: the user expects versions to be sorted latest first and timelogical to be most recent first (when viewing the HTML documentation), yet expects alphabetical sorting to be A before Z. Solution: invert integers (dates and parsed versions). :param iter remotes: List of dicts from Versions().remotes. :param iter sort: What to sort by. May be one or more of: alpha, time, semver """ exploded_alpha = list() exploded_semver = list() # Convert name to int if alpha is in sort. if 'alpha' in sort: alpha_max_len = max(len(r['name']) for r in remotes) for name in (r['name'] for r in remotes): exploded_alpha.append([ord(i) for i in name] + [0] * (alpha_max_len - len(name))) # Parse versions if semver is in sort. if 'semver' in sort: exploded_semver = semvers(r['name'] for r in remotes) # Build sort_mapping dict. sort_mapping = dict() for i, remote in enumerate(remotes): key = list() for sort_by in sort: if sort_by == 'alpha': key.extend(exploded_alpha[i]) elif sort_by == 'time': key.append(-remote['date']) elif sort_by == 'semver': key.extend(exploded_semver[i]) sort_mapping[id(remote)] = key # Sort. remotes.sort(key=lambda k: sort_mapping.get(id(k)))
Sort `remotes` in place. Allows sorting by multiple conditions. This is needed because Python 3 no longer supports sorting lists of multiple types. Sort keys must all be of the same type. Problem: the user expects versions to be sorted latest first and timelogical to be most recent first (when viewing the HTML documentation), yet expects alphabetical sorting to be A before Z. Solution: invert integers (dates and parsed versions). :param iter remotes: List of dicts from Versions().remotes. :param iter sort: What to sort by. May be one or more of: alpha, time, semver
entailment
def branches(self): """Return list of (name and urls) only branches.""" return [(r['name'], self.vpathto(r['name'])) for r in self.remotes if r['kind'] == 'heads']
Return list of (name and urls) only branches.
entailment
def tags(self): """Return list of (name and urls) only tags.""" return [(r['name'], self.vpathto(r['name'])) for r in self.remotes if r['kind'] == 'tags']
Return list of (name and urls) only tags.
entailment
def vhasdoc(self, other_version): """Return True if the other version has the current document. Like Sphinx's hasdoc(). :raise KeyError: If other_version doesn't exist. :param str other_version: Version to link to. :return: If current document is in the other version. :rtype: bool """ if self.context['current_version'] == other_version: return True return self.context['pagename'] in self[other_version]['found_docs']
Return True if the other version has the current document. Like Sphinx's hasdoc(). :raise KeyError: If other_version doesn't exist. :param str other_version: Version to link to. :return: If current document is in the other version. :rtype: bool
entailment
def vpathto(self, other_version): """Return relative path to current document in another version. Like Sphinx's pathto(). If the current document doesn't exist in the other version its master_doc path is returned instead. :raise KeyError: If other_version doesn't exist. :param str other_version: Version to link to. :return: Relative path. :rtype: str """ is_root = self.context['scv_is_root'] pagename = self.context['pagename'] if self.context['current_version'] == other_version and not is_root: return '{}.html'.format(pagename.split('/')[-1]) other_remote = self[other_version] other_root_dir = other_remote['root_dir'] components = ['..'] * pagename.count('/') components += [other_root_dir] if is_root else ['..', other_root_dir] components += [pagename if self.vhasdoc(other_version) else other_remote['master_doc']] return '{}.html'.format(__import__('posixpath').join(*components))
Return relative path to current document in another version. Like Sphinx's pathto(). If the current document doesn't exist in the other version its master_doc path is returned instead. :raise KeyError: If other_version doesn't exist. :param str other_version: Version to link to. :return: Relative path. :rtype: str
entailment
def read_local_conf(local_conf): """Search for conf.py in any rel_source directory in CWD and if found read it and return. :param str local_conf: Path to conf.py to read. :return: Loaded conf.py. :rtype: dict """ log = logging.getLogger(__name__) # Attempt to read. log.info('Reading config from %s...', local_conf) try: config = read_config(os.path.dirname(local_conf), '<local>') except HandledError: log.warning('Unable to read file, continuing with only CLI args.') return dict() # Filter and return. return {k[4:]: v for k, v in config.items() if k.startswith('scv_') and not k[4:].startswith('_')}
Search for conf.py in any rel_source directory in CWD and if found read it and return. :param str local_conf: Path to conf.py to read. :return: Loaded conf.py. :rtype: dict
entailment
def gather_git_info(root, conf_rel_paths, whitelist_branches, whitelist_tags): """Gather info about the remote git repository. Get list of refs. :raise HandledError: If function fails with a handled error. Will be logged before raising. :param str root: Root directory of repository. :param iter conf_rel_paths: List of possible relative paths (to git root) of Sphinx conf.py (e.g. docs/conf.py). :param iter whitelist_branches: Optional list of patterns to filter branches by. :param iter whitelist_tags: Optional list of patterns to filter tags by. :return: Commits with docs. A list of tuples: (sha, name, kind, date, conf_rel_path). :rtype: list """ log = logging.getLogger(__name__) # List remote. log.info('Getting list of all remote branches/tags...') try: remotes = list_remote(root) except GitError as exc: log.error(exc.message) log.error(exc.output) raise HandledError log.info('Found: %s', ' '.join(i[1] for i in remotes)) # Filter and date. try: try: dates_paths = filter_and_date(root, conf_rel_paths, (i[0] for i in remotes)) except GitError: log.info('Need to fetch from remote...') fetch_commits(root, remotes) try: dates_paths = filter_and_date(root, conf_rel_paths, (i[0] for i in remotes)) except GitError as exc: log.error(exc.message) log.error(exc.output) raise HandledError except subprocess.CalledProcessError as exc: log.debug(json.dumps(dict(command=exc.cmd, cwd=root, code=exc.returncode, output=exc.output))) log.error('Failed to get dates for all remote commits.') raise HandledError filtered_remotes = [[i[0], i[1], i[2], ] + dates_paths[i[0]] for i in remotes if i[0] in dates_paths] log.info('With docs: %s', ' '.join(i[1] for i in filtered_remotes)) if not whitelist_branches and not whitelist_tags: return filtered_remotes # Apply whitelist. whitelisted_remotes = list() for remote in filtered_remotes: if remote[2] == 'heads' and whitelist_branches: if not any(re.search(p, remote[1]) for p in whitelist_branches): continue if remote[2] == 'tags' and whitelist_tags: if not any(re.search(p, remote[1]) for p in whitelist_tags): continue whitelisted_remotes.append(remote) log.info('Passed whitelisting: %s', ' '.join(i[1] for i in whitelisted_remotes)) return whitelisted_remotes
Gather info about the remote git repository. Get list of refs. :raise HandledError: If function fails with a handled error. Will be logged before raising. :param str root: Root directory of repository. :param iter conf_rel_paths: List of possible relative paths (to git root) of Sphinx conf.py (e.g. docs/conf.py). :param iter whitelist_branches: Optional list of patterns to filter branches by. :param iter whitelist_tags: Optional list of patterns to filter tags by. :return: Commits with docs. A list of tuples: (sha, name, kind, date, conf_rel_path). :rtype: list
entailment
def pre_build(local_root, versions): """Build docs for all versions to determine root directory and master_doc names. Need to build docs to (a) avoid filename collision with files from root_ref and branch/tag names and (b) determine master_doc config values for all versions (in case master_doc changes from e.g. contents.rst to index.rst between versions). Exports all commits into a temporary directory and returns the path to avoid re-exporting during the final build. :param str local_root: Local path to git root directory. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :return: Tempdir path with exported commits as subdirectories. :rtype: str """ log = logging.getLogger(__name__) exported_root = TempDir(True).name # Extract all. for sha in {r['sha'] for r in versions.remotes}: target = os.path.join(exported_root, sha) log.debug('Exporting %s to temporary directory.', sha) export(local_root, sha, target) # Build root. remote = versions[Config.from_context().root_ref] with TempDir() as temp_dir: log.debug('Building root (before setting root_dirs) in temporary directory: %s', temp_dir) source = os.path.dirname(os.path.join(exported_root, remote['sha'], remote['conf_rel_path'])) build(source, temp_dir, versions, remote['name'], True) existing = os.listdir(temp_dir) # Define root_dir for all versions to avoid file name collisions. for remote in versions.remotes: root_dir = RE_INVALID_FILENAME.sub('_', remote['name']) while root_dir in existing: root_dir += '_' remote['root_dir'] = root_dir log.debug('%s root directory is %s', remote['name'], root_dir) existing.append(root_dir) # Get found_docs and master_doc values for all versions. for remote in list(versions.remotes): log.debug('Partially running sphinx-build to read configuration for: %s', remote['name']) source = os.path.dirname(os.path.join(exported_root, remote['sha'], remote['conf_rel_path'])) try: config = read_config(source, remote['name']) except HandledError: log.warning('Skipping. Will not be building: %s', remote['name']) versions.remotes.pop(versions.remotes.index(remote)) continue remote['found_docs'] = config['found_docs'] remote['master_doc'] = config['master_doc'] return exported_root
Build docs for all versions to determine root directory and master_doc names. Need to build docs to (a) avoid filename collision with files from root_ref and branch/tag names and (b) determine master_doc config values for all versions (in case master_doc changes from e.g. contents.rst to index.rst between versions). Exports all commits into a temporary directory and returns the path to avoid re-exporting during the final build. :param str local_root: Local path to git root directory. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :return: Tempdir path with exported commits as subdirectories. :rtype: str
entailment
def build_all(exported_root, destination, versions): """Build all versions. :param str exported_root: Tempdir path with exported commits as subdirectories. :param str destination: Destination directory to copy/overwrite built docs to. Does not delete old files. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. """ log = logging.getLogger(__name__) while True: # Build root. remote = versions[Config.from_context().root_ref] log.info('Building root: %s', remote['name']) source = os.path.dirname(os.path.join(exported_root, remote['sha'], remote['conf_rel_path'])) build(source, destination, versions, remote['name'], True) # Build all refs. for remote in list(versions.remotes): log.info('Building ref: %s', remote['name']) source = os.path.dirname(os.path.join(exported_root, remote['sha'], remote['conf_rel_path'])) target = os.path.join(destination, remote['root_dir']) try: build(source, target, versions, remote['name'], False) except HandledError: log.warning('Skipping. Will not be building %s. Rebuilding everything.', remote['name']) versions.remotes.pop(versions.remotes.index(remote)) break # Break out of for loop. else: break
Build all versions. :param str exported_root: Tempdir path with exported commits as subdirectories. :param str destination: Destination directory to copy/overwrite built docs to. Does not delete old files. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
entailment
def setup(app): """Called by Sphinx during phase 0 (initialization). :param sphinx.application.Sphinx app: Sphinx application object. :returns: Extension version. :rtype: dict """ # Used internally. For rebuilding all pages when one or versions fail. app.add_config_value('sphinxcontrib_versioning_versions', SC_VERSIONING_VERSIONS, 'html') # Needed for banner. app.config.html_static_path.append(STATIC_DIR) app.add_stylesheet('banner.css') # Tell Sphinx which config values can be set by the user. for name, default in Config(): app.add_config_value('scv_{}'.format(name), default, 'html') # Event handlers. app.connect('builder-inited', EventHandlers.builder_inited) app.connect('env-updated', EventHandlers.env_updated) app.connect('html-page-context', EventHandlers.html_page_context) return dict(version=__version__)
Called by Sphinx during phase 0 (initialization). :param sphinx.application.Sphinx app: Sphinx application object. :returns: Extension version. :rtype: dict
entailment
def _build(argv, config, versions, current_name, is_root): """Build Sphinx docs via multiprocessing for isolation. :param tuple argv: Arguments to pass to Sphinx. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :param str current_name: The ref name of the current version being built. :param bool is_root: Is this build in the web root? """ # Patch. application.Config = ConfigInject if config.show_banner: EventHandlers.BANNER_GREATEST_TAG = config.banner_greatest_tag EventHandlers.BANNER_MAIN_VERSION = config.banner_main_ref EventHandlers.BANNER_RECENT_TAG = config.banner_recent_tag EventHandlers.SHOW_BANNER = True EventHandlers.CURRENT_VERSION = current_name EventHandlers.IS_ROOT = is_root EventHandlers.VERSIONS = versions SC_VERSIONING_VERSIONS[:] = [p for r in versions.remotes for p in sorted(r.items()) if p[0] not in ('sha', 'date')] # Update argv. if config.verbose > 1: argv += ('-v',) * (config.verbose - 1) if config.no_colors: argv += ('-N',) if config.overflow: argv += config.overflow # Build. result = build_main(argv) if result != 0: raise SphinxError
Build Sphinx docs via multiprocessing for isolation. :param tuple argv: Arguments to pass to Sphinx. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :param str current_name: The ref name of the current version being built. :param bool is_root: Is this build in the web root?
entailment
def _read_config(argv, config, current_name, queue): """Read the Sphinx config via multiprocessing for isolation. :param tuple argv: Arguments to pass to Sphinx. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param str current_name: The ref name of the current version being built. :param multiprocessing.queues.Queue queue: Communication channel to parent process. """ # Patch. EventHandlers.ABORT_AFTER_READ = queue # Run. _build(argv, config, Versions(list()), current_name, False)
Read the Sphinx config via multiprocessing for isolation. :param tuple argv: Arguments to pass to Sphinx. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param str current_name: The ref name of the current version being built. :param multiprocessing.queues.Queue queue: Communication channel to parent process.
entailment
def build(source, target, versions, current_name, is_root): """Build Sphinx docs for one version. Includes Versions class instance with names/urls in the HTML context. :raise HandledError: If sphinx-build fails. Will be logged before raising. :param str source: Source directory to pass to sphinx-build. :param str target: Destination directory to write documentation to (passed to sphinx-build). :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :param str current_name: The ref name of the current version being built. :param bool is_root: Is this build in the web root? """ log = logging.getLogger(__name__) argv = ('sphinx-build', source, target) config = Config.from_context() log.debug('Running sphinx-build for %s with args: %s', current_name, str(argv)) child = multiprocessing.Process(target=_build, args=(argv, config, versions, current_name, is_root)) child.start() child.join() # Block. if child.exitcode != 0: log.error('sphinx-build failed for branch/tag: %s', current_name) raise HandledError
Build Sphinx docs for one version. Includes Versions class instance with names/urls in the HTML context. :raise HandledError: If sphinx-build fails. Will be logged before raising. :param str source: Source directory to pass to sphinx-build. :param str target: Destination directory to write documentation to (passed to sphinx-build). :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :param str current_name: The ref name of the current version being built. :param bool is_root: Is this build in the web root?
entailment
def read_config(source, current_name): """Read the Sphinx config for one version. :raise HandledError: If sphinx-build fails. Will be logged before raising. :param str source: Source directory to pass to sphinx-build. :param str current_name: The ref name of the current version being built. :return: Specific Sphinx config values. :rtype: dict """ log = logging.getLogger(__name__) queue = multiprocessing.Queue() config = Config.from_context() with TempDir() as temp_dir: argv = ('sphinx-build', source, temp_dir) log.debug('Running sphinx-build for config values with args: %s', str(argv)) child = multiprocessing.Process(target=_read_config, args=(argv, config, current_name, queue)) child.start() child.join() # Block. if child.exitcode != 0: log.error('sphinx-build failed for branch/tag while reading config: %s', current_name) raise HandledError config = queue.get() return config
Read the Sphinx config for one version. :raise HandledError: If sphinx-build fails. Will be logged before raising. :param str source: Source directory to pass to sphinx-build. :param str current_name: The ref name of the current version being built. :return: Specific Sphinx config values. :rtype: dict
entailment
def builder_inited(app): """Update the Sphinx builder. :param sphinx.application.Sphinx app: Sphinx application object. """ # Add this extension's _templates directory to Sphinx. templates_dir = os.path.join(os.path.dirname(__file__), '_templates') app.builder.templates.pathchain.insert(0, templates_dir) app.builder.templates.loaders.insert(0, SphinxFileSystemLoader(templates_dir)) app.builder.templates.templatepathlen += 1 # Add versions.html to sidebar. if '**' not in app.config.html_sidebars: app.config.html_sidebars['**'] = StandaloneHTMLBuilder.default_sidebars + ['versions.html'] elif 'versions.html' not in app.config.html_sidebars['**']: app.config.html_sidebars['**'].append('versions.html')
Update the Sphinx builder. :param sphinx.application.Sphinx app: Sphinx application object.
entailment
def env_updated(cls, app, env): """Abort Sphinx after initializing config and discovering all pages to build. :param sphinx.application.Sphinx app: Sphinx application object. :param sphinx.environment.BuildEnvironment env: Sphinx build environment. """ if cls.ABORT_AFTER_READ: config = {n: getattr(app.config, n) for n in (a for a in dir(app.config) if a.startswith('scv_'))} config['found_docs'] = tuple(str(d) for d in env.found_docs) config['master_doc'] = str(app.config.master_doc) cls.ABORT_AFTER_READ.put(config) sys.exit(0)
Abort Sphinx after initializing config and discovering all pages to build. :param sphinx.application.Sphinx app: Sphinx application object. :param sphinx.environment.BuildEnvironment env: Sphinx build environment.
entailment
def html_page_context(cls, app, pagename, templatename, context, doctree): """Update the Jinja2 HTML context, exposes the Versions class instance to it. :param sphinx.application.Sphinx app: Sphinx application object. :param str pagename: Name of the page being rendered (without .html or any file extension). :param str templatename: Page name with .html. :param dict context: Jinja2 HTML context. :param docutils.nodes.document doctree: Tree of docutils nodes. """ assert templatename or doctree # Unused, for linting. cls.VERSIONS.context = context versions = cls.VERSIONS this_remote = versions[cls.CURRENT_VERSION] banner_main_remote = versions[cls.BANNER_MAIN_VERSION] if cls.SHOW_BANNER else None # Update Jinja2 context. context['bitbucket_version'] = cls.CURRENT_VERSION context['current_version'] = cls.CURRENT_VERSION context['github_version'] = cls.CURRENT_VERSION context['html_theme'] = app.config.html_theme context['scv_banner_greatest_tag'] = cls.BANNER_GREATEST_TAG context['scv_banner_main_ref_is_branch'] = banner_main_remote['kind'] == 'heads' if cls.SHOW_BANNER else None context['scv_banner_main_ref_is_tag'] = banner_main_remote['kind'] == 'tags' if cls.SHOW_BANNER else None context['scv_banner_main_version'] = banner_main_remote['name'] if cls.SHOW_BANNER else None context['scv_banner_recent_tag'] = cls.BANNER_RECENT_TAG context['scv_is_branch'] = this_remote['kind'] == 'heads' context['scv_is_greatest_tag'] = this_remote == versions.greatest_tag_remote context['scv_is_recent_branch'] = this_remote == versions.recent_branch_remote context['scv_is_recent_ref'] = this_remote == versions.recent_remote context['scv_is_recent_tag'] = this_remote == versions.recent_tag_remote context['scv_is_root'] = cls.IS_ROOT context['scv_is_tag'] = this_remote['kind'] == 'tags' context['scv_show_banner'] = cls.SHOW_BANNER context['versions'] = versions context['vhasdoc'] = versions.vhasdoc context['vpathto'] = versions.vpathto # Insert banner into body. if cls.SHOW_BANNER and 'body' in context: parsed = app.builder.templates.render('banner.html', context) context['body'] = parsed + context['body'] # Handle overridden css_files. css_files = context.setdefault('css_files', list()) if '_static/banner.css' not in css_files: css_files.append('_static/banner.css') # Handle overridden html_static_path. if STATIC_DIR not in app.config.html_static_path: app.config.html_static_path.append(STATIC_DIR) # Reset last_updated with file's mtime (will be last git commit authored date). if app.config.html_last_updated_fmt is not None: file_path = app.env.doc2path(pagename) if os.path.isfile(file_path): lufmt = app.config.html_last_updated_fmt or getattr(locale, '_')('%b %d, %Y') mtime = datetime.datetime.fromtimestamp(os.path.getmtime(file_path)) context['last_updated'] = format_date(lufmt, mtime, language=app.config.language, warn=app.warn)
Update the Jinja2 HTML context, exposes the Versions class instance to it. :param sphinx.application.Sphinx app: Sphinx application object. :param str pagename: Name of the page being rendered (without .html or any file extension). :param str templatename: Page name with .html. :param dict context: Jinja2 HTML context. :param docutils.nodes.document doctree: Tree of docutils nodes.
entailment
def cli(config, **options): """Build versioned Sphinx docs for every branch and tag pushed to origin. Supports only building locally with the "build" sub command or build and push to a remote with the "push" sub command. For more information for either run them with their own --help. The options below are global and must be specified before the sub command name (e.g. -N build ...). \f :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param dict options: Additional Click options. """ def pre(rel_source): """To be executed in a Click sub command. Needed because if this code is in cli() it will be executed when the user runs: <command> <sub command> --help :param tuple rel_source: Possible relative paths (to git root) of Sphinx directory containing conf.py. """ # Setup logging. if not NO_EXECUTE: setup_logging(verbose=config.verbose, colors=not config.no_colors) log = logging.getLogger(__name__) # Change current working directory. if config.chdir: os.chdir(config.chdir) log.debug('Working directory: %s', os.getcwd()) else: config.update(dict(chdir=os.getcwd()), overwrite=True) # Get and verify git root. try: config.update(dict(git_root=get_root(config.git_root or os.getcwd())), overwrite=True) except GitError as exc: log.error(exc.message) log.error(exc.output) raise HandledError # Look for local config. if config.no_local_conf: config.update(dict(local_conf=None), overwrite=True) elif not config.local_conf: candidates = [p for p in (os.path.join(s, 'conf.py') for s in rel_source) if os.path.isfile(p)] if candidates: config.update(dict(local_conf=candidates[0]), overwrite=True) else: log.debug("Didn't find a conf.py in any REL_SOURCE.") elif os.path.basename(config.local_conf) != 'conf.py': log.error('Path "%s" must end with conf.py.', config.local_conf) raise HandledError config['pre'] = pre # To be called by Click sub commands. config.update(options)
Build versioned Sphinx docs for every branch and tag pushed to origin. Supports only building locally with the "build" sub command or build and push to a remote with the "push" sub command. For more information for either run them with their own --help. The options below are global and must be specified before the sub command name (e.g. -N build ...). \f :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param dict options: Additional Click options.
entailment
def build_options(func): """Add "build" Click options to function. :param function func: The function to wrap. :return: The wrapped function. :rtype: function """ func = click.option('-a', '--banner-greatest-tag', is_flag=True, help='Override banner-main-ref to be the tag with the highest version number.')(func) func = click.option('-A', '--banner-recent-tag', is_flag=True, help='Override banner-main-ref to be the most recent committed tag.')(func) func = click.option('-b', '--show-banner', help='Show a warning banner.', is_flag=True)(func) func = click.option('-B', '--banner-main-ref', help="Don't show banner on this ref and point banner URLs to this ref. Default master.")(func) func = click.option('-i', '--invert', help='Invert/reverse order of versions.', is_flag=True)(func) func = click.option('-p', '--priority', type=click.Choice(('branches', 'tags')), help="Group these kinds of versions at the top (for themes that don't separate them).")(func) func = click.option('-r', '--root-ref', help='The branch/tag at the root of DESTINATION. Will also be in subdir. Default master.')(func) func = click.option('-s', '--sort', multiple=True, type=click.Choice(('semver', 'alpha', 'time')), help='Sort versions. Specify multiple times to sort equal values of one kind.')(func) func = click.option('-t', '--greatest-tag', is_flag=True, help='Override root-ref to be the tag with the highest version number.')(func) func = click.option('-T', '--recent-tag', is_flag=True, help='Override root-ref to be the most recent committed tag.')(func) func = click.option('-w', '--whitelist-branches', multiple=True, help='Whitelist branches that match the pattern. Can be specified more than once.')(func) func = click.option('-W', '--whitelist-tags', multiple=True, help='Whitelist tags that match the pattern. Can be specified more than once.')(func) return func
Add "build" Click options to function. :param function func: The function to wrap. :return: The wrapped function. :rtype: function
entailment
def override_root_main_ref(config, remotes, banner): """Override root_ref or banner_main_ref with tags in config if user requested. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param iter remotes: List of dicts from Versions.remotes. :param bool banner: Evaluate banner main ref instead of root ref. :return: If root/main ref exists. :rtype: bool """ log = logging.getLogger(__name__) greatest_tag = config.banner_greatest_tag if banner else config.greatest_tag recent_tag = config.banner_recent_tag if banner else config.recent_tag if greatest_tag or recent_tag: candidates = [r for r in remotes if r['kind'] == 'tags'] if candidates: multi_sort(candidates, ['semver' if greatest_tag else 'time']) config.update({'banner_main_ref' if banner else 'root_ref': candidates[0]['name']}, overwrite=True) else: flag = '--banner-main-ref' if banner else '--root-ref' log.warning('No git tags with docs found in remote. Falling back to %s value.', flag) ref = config.banner_main_ref if banner else config.root_ref return ref in [r['name'] for r in remotes]
Override root_ref or banner_main_ref with tags in config if user requested. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param iter remotes: List of dicts from Versions.remotes. :param bool banner: Evaluate banner main ref instead of root ref. :return: If root/main ref exists. :rtype: bool
entailment
def build(config, rel_source, destination, **options): """Fetch branches/tags and build all locally. Doesn't push anything to remote. Just fetch all remote branches and tags, export them to a temporary directory, run sphinx-build on each one, and then store all built documentation in DESTINATION. REL_SOURCE is the path to the docs directory relative to the git root. If the source directory has moved around between git tags you can specify additional directories. DESTINATION is the path to the local directory that will hold all generated docs for all versions. To pass options to sphinx-build (run for every branch/tag) use a double hyphen (e.g. build docs docs/_build/html -- -D setting=value). \f :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param tuple rel_source: Possible relative paths (to git root) of Sphinx directory containing conf.py (e.g. docs). :param str destination: Destination directory to copy/overwrite built docs to. Does not delete old files. :param dict options: Additional Click options. """ if 'pre' in config: config.pop('pre')(rel_source) config.update({k: v for k, v in options.items() if v}) if config.local_conf: config.update(read_local_conf(config.local_conf), ignore_set=True) if NO_EXECUTE: raise RuntimeError(config, rel_source, destination) log = logging.getLogger(__name__) # Gather git data. log.info('Gathering info about the remote git repository...') conf_rel_paths = [os.path.join(s, 'conf.py') for s in rel_source] remotes = gather_git_info(config.git_root, conf_rel_paths, config.whitelist_branches, config.whitelist_tags) if not remotes: log.error('No docs found in any remote branch/tag. Nothing to do.') raise HandledError versions = Versions( remotes, sort=config.sort, priority=config.priority, invert=config.invert, ) # Get root ref. if not override_root_main_ref(config, versions.remotes, False): log.error('Root ref %s not found in: %s', config.root_ref, ' '.join(r[1] for r in remotes)) raise HandledError log.info('Root ref is: %s', config.root_ref) # Get banner main ref. if not config.show_banner: config.update(dict(banner_greatest_tag=False, banner_main_ref=None, banner_recent_tag=False), overwrite=True) elif not override_root_main_ref(config, versions.remotes, True): log.warning('Banner main ref %s not found in: %s', config.banner_main_ref, ' '.join(r[1] for r in remotes)) log.warning('Disabling banner.') config.update(dict(banner_greatest_tag=False, banner_main_ref=None, banner_recent_tag=False, show_banner=False), overwrite=True) else: log.info('Banner main ref is: %s', config.banner_main_ref) # Pre-build. log.info("Pre-running Sphinx to collect versions' master_doc and other info.") exported_root = pre_build(config.git_root, versions) if config.banner_main_ref and config.banner_main_ref not in [r['name'] for r in versions.remotes]: log.warning('Banner main ref %s failed during pre-run. Disabling banner.', config.banner_main_ref) config.update(dict(banner_greatest_tag=False, banner_main_ref=None, banner_recent_tag=False, show_banner=False), overwrite=True) # Build. build_all(exported_root, destination, versions) # Cleanup. log.debug('Removing: %s', exported_root) shutil.rmtree(exported_root) # Store versions in state for push(). config['versions'] = versions
Fetch branches/tags and build all locally. Doesn't push anything to remote. Just fetch all remote branches and tags, export them to a temporary directory, run sphinx-build on each one, and then store all built documentation in DESTINATION. REL_SOURCE is the path to the docs directory relative to the git root. If the source directory has moved around between git tags you can specify additional directories. DESTINATION is the path to the local directory that will hold all generated docs for all versions. To pass options to sphinx-build (run for every branch/tag) use a double hyphen (e.g. build docs docs/_build/html -- -D setting=value). \f :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param tuple rel_source: Possible relative paths (to git root) of Sphinx directory containing conf.py (e.g. docs). :param str destination: Destination directory to copy/overwrite built docs to. Does not delete old files. :param dict options: Additional Click options.
entailment
def push(ctx, config, rel_source, dest_branch, rel_dest, **options): """Build locally and then push to remote branch. First the build sub command is invoked which takes care of building all versions of your documentation in a temporary directory. If that succeeds then all built documents will be pushed to a remote branch. REL_SOURCE is the path to the docs directory relative to the git root. If the source directory has moved around between git tags you can specify additional directories. DEST_BRANCH is the branch name where generated docs will be committed to. The branch will then be pushed to remote. If there is a race condition with another job pushing to remote the docs will be re-generated and pushed again. REL_DEST is the path to the directory that will hold all generated docs for all versions relative to the git roof of DEST_BRANCH. To pass options to sphinx-build (run for every branch/tag) use a double hyphen (e.g. push docs gh-pages . -- -D setting=value). \f :param click.core.Context ctx: Click context. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param tuple rel_source: Possible relative paths (to git root) of Sphinx directory containing conf.py (e.g. docs). :param str dest_branch: Branch to clone and push to. :param str rel_dest: Relative path (to git root) to write generated docs to. :param dict options: Additional Click options. """ if 'pre' in config: config.pop('pre')(rel_source) config.update({k: v for k, v in options.items() if v}) if config.local_conf: config.update(read_local_conf(config.local_conf), ignore_set=True) if NO_EXECUTE: raise RuntimeError(config, rel_source, dest_branch, rel_dest) log = logging.getLogger(__name__) # Clone, build, push. for _ in range(PUSH_RETRIES): with TempDir() as temp_dir: log.info('Cloning %s into temporary directory...', dest_branch) try: clone(config.git_root, temp_dir, config.push_remote, dest_branch, rel_dest, config.grm_exclude) except GitError as exc: log.error(exc.message) log.error(exc.output) raise HandledError log.info('Building docs...') ctx.invoke(build, rel_source=rel_source, destination=os.path.join(temp_dir, rel_dest)) versions = config.pop('versions') log.info('Attempting to push to branch %s on remote repository.', dest_branch) try: if commit_and_push(temp_dir, config.push_remote, versions): return except GitError as exc: log.error(exc.message) log.error(exc.output) raise HandledError log.warning('Failed to push to remote repository. Retrying in %d seconds...', PUSH_SLEEP) time.sleep(PUSH_SLEEP) # Failed if this is reached. log.error('Ran out of retries, giving up.') raise HandledError
Build locally and then push to remote branch. First the build sub command is invoked which takes care of building all versions of your documentation in a temporary directory. If that succeeds then all built documents will be pushed to a remote branch. REL_SOURCE is the path to the docs directory relative to the git root. If the source directory has moved around between git tags you can specify additional directories. DEST_BRANCH is the branch name where generated docs will be committed to. The branch will then be pushed to remote. If there is a race condition with another job pushing to remote the docs will be re-generated and pushed again. REL_DEST is the path to the directory that will hold all generated docs for all versions relative to the git roof of DEST_BRANCH. To pass options to sphinx-build (run for every branch/tag) use a double hyphen (e.g. push docs gh-pages . -- -D setting=value). \f :param click.core.Context ctx: Click context. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param tuple rel_source: Possible relative paths (to git root) of Sphinx directory containing conf.py (e.g. docs). :param str dest_branch: Branch to clone and push to. :param str rel_dest: Relative path (to git root) to write generated docs to. :param dict options: Additional Click options.
entailment
def custom_sort(param): """Custom Click(Command|Group).params sorter. Case insensitive sort with capitals after lowercase. --version at the end since I can't sort --help. :param click.core.Option param: Parameter to evaluate. :return: Sort weight. :rtype: int """ option = param.opts[0].lstrip('-') if param.param_type_name != 'option': return False, return True, option == 'version', option.lower(), option.swapcase()
Custom Click(Command|Group).params sorter. Case insensitive sort with capitals after lowercase. --version at the end since I can't sort --help. :param click.core.Option param: Parameter to evaluate. :return: Sort weight. :rtype: int
entailment
def get_params(self, ctx): """Sort order of options before displaying. :param click.core.Context ctx: Click context. :return: super() return value. """ self.params.sort(key=self.custom_sort) return super(ClickGroup, self).get_params(ctx)
Sort order of options before displaying. :param click.core.Context ctx: Click context. :return: super() return value.
entailment
def main(self, *args, **kwargs): """Main function called by setuptools. :param list args: Passed to super(). :param dict kwargs: Passed to super(). :return: super() return value. """ argv = kwargs.pop('args', click.get_os_args()) if '--' in argv: pos = argv.index('--') argv, self.overflow = argv[:pos], tuple(argv[pos + 1:]) else: argv, self.overflow = argv, tuple() return super(ClickGroup, self).main(args=argv, *args, **kwargs)
Main function called by setuptools. :param list args: Passed to super(). :param dict kwargs: Passed to super(). :return: super() return value.
entailment
def invoke(self, ctx): """Inject overflow arguments into context state. :param click.core.Context ctx: Click context. :return: super() return value. """ if self.overflow: ctx.ensure_object(Config).update(dict(overflow=self.overflow)) return super(ClickGroup, self).invoke(ctx)
Inject overflow arguments into context state. :param click.core.Context ctx: Click context. :return: super() return value.
entailment
def from_context(cls): """Retrieve this class' instance from the current Click context. :return: Instance of this class. :rtype: Config """ try: ctx = click.get_current_context() except RuntimeError: return cls() return ctx.find_object(cls)
Retrieve this class' instance from the current Click context. :return: Instance of this class. :rtype: Config
entailment
def update(self, params, ignore_set=False, overwrite=False): """Set instance values from dictionary. :param dict params: Click context params. :param bool ignore_set: Skip already-set values instead of raising AttributeError. :param bool overwrite: Allow overwriting already-set values. """ log = logging.getLogger(__name__) valid = {i[0] for i in self} for key, value in params.items(): if not hasattr(self, key): raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, key)) if key not in valid: message = "'{}' object does not support item assignment on '{}'" raise AttributeError(message.format(self.__class__.__name__, key)) if key in self._already_set: if ignore_set: log.debug('%s already set in config, skipping.', key) continue if not overwrite: message = "'{}' object does not support item re-assignment on '{}'" raise AttributeError(message.format(self.__class__.__name__, key)) setattr(self, key, value) self._already_set.add(key)
Set instance values from dictionary. :param dict params: Click context params. :param bool ignore_set: Skip already-set values instead of raising AttributeError. :param bool overwrite: Allow overwriting already-set values.
entailment
def cleanup(self): """Recursively delete directory.""" shutil.rmtree(self.name, onerror=lambda *a: os.chmod(a[1], __import__('stat').S_IWRITE) or os.unlink(a[1])) if os.path.exists(self.name): raise IOError(17, "File exists: '{}'".format(self.name))
Recursively delete directory.
entailment
def get_resources(cls): """Returns Ext Resources.""" exts = [] parent = dict(member_name=ciscohostingdevicemanager.DEVICE, collection_name=ciscohostingdevicemanager.DEVICES) controller = resource.Resource( RouterHostingDeviceSchedulerController(), cb_faults.FAULT_MAP) exts.append(extensions.ResourceExtension( DEVICE_L3_ROUTERS, controller, parent, path_prefix="/dev_mgr")) parent = dict(member_name="router", collection_name=l3_const.ROUTERS) controller = resource.Resource( HostingDevicesHostingRouterController(), cb_faults.FAULT_MAP) exts.append(extensions.ResourceExtension(L3_ROUTER_DEVICES, controller, parent)) return exts
Returns Ext Resources.
entailment
def assign_hosting_device_to_cfg_agent(self, context, cfg_agent_id, hosting_device_id): """Make config agent handle an (unassigned) hosting device.""" hd_db = self._get_hosting_device(context, hosting_device_id) if hd_db.cfg_agent_id: if hd_db.cfg_agent_id == cfg_agent_id: return LOG.debug('Hosting device %(hd_id)s has already been assigned to ' 'Cisco cfg agent %(agent_id)s', {'hd_id': hosting_device_id, 'agent_id': cfg_agent_id}) raise ciscocfgagentscheduler.HostingDeviceAssignedToCfgAgent( hosting_device_id=hosting_device_id, agent_id=cfg_agent_id) cfg_agent_db = get_agent_db_obj(self._get_agent(context, cfg_agent_id)) if (cfg_agent_db.agent_type != c_constants.AGENT_TYPE_CFG or cfg_agent_db.admin_state_up is not True): raise ciscocfgagentscheduler.InvalidCfgAgent(agent_id=cfg_agent_id) self._bind_hosting_device_to_cfg_agent(context, hd_db, cfg_agent_db) cfg_notifier = self.agent_notifiers.get(c_constants.AGENT_TYPE_CFG) if cfg_notifier: cfg_notifier.hosting_devices_assigned_to_cfg_agent( context, [hosting_device_id], cfg_agent_db.host)
Make config agent handle an (unassigned) hosting device.
entailment
def unassign_hosting_device_from_cfg_agent(self, context, cfg_agent_id, hosting_device_id): """Make config agent handle an (unassigned) hosting device.""" hd_db = self._get_hosting_device(context, hosting_device_id) if hd_db.cfg_agent_id is None and cfg_agent_id is None: return elif hd_db.cfg_agent_id != cfg_agent_id: LOG.debug('Hosting device %(hd_id)s is not assigned to Cisco ' 'cfg agent %(agent_id)s', {'hd_id': hosting_device_id, 'agent_id': cfg_agent_id}) raise ciscocfgagentscheduler.HostingDeviceNotAssignedToCfgAgent( hosting_device_id=hosting_device_id, agent_id=cfg_agent_id) cfg_agent_db = get_agent_db_obj(self._get_agent(context, cfg_agent_id)) cfg_notifier = self.agent_notifiers.get(c_constants.AGENT_TYPE_CFG) if cfg_notifier: cfg_notifier.hosting_devices_unassigned_from_cfg_agent( context, [hosting_device_id], cfg_agent_db.host) self._bind_hosting_device_to_cfg_agent(context, hd_db, None)
Make config agent handle an (unassigned) hosting device.
entailment
def update_subports(self, port): """Set port attributes for trunk subports. For baremetal deployments only, set the neutron port attributes during the bind_port event. """ trunk_details = port.get('trunk_details') subports = trunk_details['sub_ports'] host_id = port.get(bc.dns.DNSNAME) context = bc.get_context() el_context = context.elevated() for subport in subports: bc.get_plugin().update_port(el_context, subport['port_id'], {'port': {bc.portbindings.HOST_ID: host_id, 'device_owner': bc.trunk_consts.TRUNK_SUBPORT_OWNER}}) # Set trunk to ACTIVE status. trunk_obj = bc.trunk_objects.Trunk.get_object( el_context, id=trunk_details['trunk_id']) trunk_obj.update(status=bc.trunk_consts.ACTIVE_STATUS)
Set port attributes for trunk subports. For baremetal deployments only, set the neutron port attributes during the bind_port event.
entailment
def read_static_uplink(self): """Read the static uplink from file, if given.""" if self.node_list is None or self.node_uplink_list is None: return for node, port in zip(self.node_list.split(','), self.node_uplink_list.split(',')): if node.strip() == self.host_name: self.static_uplink = True self.static_uplink_port = port.strip() return
Read the static uplink from file, if given.
entailment
def vdp_vlan_change_cb(self, port_uuid, lvid, vdp_vlan, fail_reason): """Callback function for updating the VDP VLAN in DB. """ LOG.info("Vlan change CB lvid %(lvid)s VDP %(vdp)s", {'lvid': lvid, 'vdp': vdp_vlan}) self.update_vm_result(port_uuid, constants.RESULT_SUCCESS, lvid=lvid, vdp_vlan=vdp_vlan, fail_reason=fail_reason)
Callback function for updating the VDP VLAN in DB.
entailment
def process_bulk_vm_event(self, msg, phy_uplink): """Process the VM bulk event usually after a restart. """ LOG.info("In processing Bulk VM Event status %s", msg) time.sleep(3) if (not self.uplink_det_compl or phy_uplink not in self.ovs_vdp_obj_dict): # This condition shouldn't be hit as only when uplink is obtained, # save_uplink is called and that in turns calls this process_bulk. LOG.error("Uplink Port Event not received," "yet in bulk process") return ovs_vdp_obj = self.ovs_vdp_obj_dict[phy_uplink] for vm_dict in msg.msg_dict.get('vm_bulk_list'): if vm_dict['status'] == 'down': ovs_vdp_obj.pop_local_cache(vm_dict['port_uuid'], vm_dict['vm_mac'], vm_dict['net_uuid'], vm_dict['local_vlan'], vm_dict['vdp_vlan'], vm_dict['segmentation_id']) vm_msg = VdpQueMsg(constants.VM_MSG_TYPE, port_uuid=vm_dict['port_uuid'], vm_mac=vm_dict['vm_mac'], net_uuid=vm_dict['net_uuid'], segmentation_id=vm_dict['segmentation_id'], status=vm_dict['status'], oui=vm_dict['oui'], phy_uplink=phy_uplink) self.process_vm_event(vm_msg, phy_uplink)
Process the VM bulk event usually after a restart.
entailment
def is_openstack_running(self): """Currently it just checks for the presence of both the bridges. """ try: if (ovs_vdp.is_bridge_present(self.br_ex, self.root_helper) and ovs_vdp.is_bridge_present(self.br_integ, self.root_helper)): return True else: return False except Exception as e: LOG.error("Exception in is_openstack_running %s", str(e)) return False
Currently it just checks for the presence of both the bridges.
entailment
def _fill_topology_cfg(self, topo_dict): """Fills the extra configurations in the topology. """ cfg_dict = {} if topo_dict.bond_member_ports is not None: cfg_dict.update({'bond_member_ports': topo_dict.bond_member_ports}) if topo_dict.bond_interface is not None: cfg_dict.update({'bond_interface': topo_dict.bond_interface}) return cfg_dict
Fills the extra configurations in the topology.
entailment
def uplink_bond_intf_process(self): """Process the case when uplink interface becomes part of a bond. This is called to check if the phy interface became a part of the bond. If the below condition is True, this means, a physical interface that was not a part of a bond was earlier discovered as uplink and now that interface became part of the bond. Usually, this doesn't happen as LLDP and in turn this function will first detect a 'down' followed by an 'up'. When regular interface becomes part of bond, it's rare for it to hit this 'normal' case. But, still providing the functionality if it happens. The following is done : a. Bring down the physical interface by sending a 'down' event b. Add the bond interface by sending an 'up' event Consquently, when bond is added that will be assigned to self.phy_uplink. Then, the below condition will be False. i.e.. 'get_bond_intf' will return False, when the argument is 'bond0'. """ bond_intf = sys_utils.get_bond_intf(self.phy_uplink) if bond_intf is None: return False self.save_uplink( fail_reason=constants.port_transition_bond_down_reason) self.process_uplink_ongoing = True upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE, status='down', phy_uplink=self.phy_uplink, br_int=self.br_integ, br_ex=self.br_ex, root_helper=self.root_helper) self.que.enqueue(constants.Q_UPL_PRIO, upl_msg) self.phy_uplink = None self.veth_intf = None self.uplink_det_compl = False # No veth interface self.save_uplink( uplink=bond_intf, fail_reason=constants.port_transition_bond_up_reason) self.phy_uplink = bond_intf self.process_uplink_ongoing = True upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE, status='up', phy_uplink=self.phy_uplink, br_int=self.br_integ, br_ex=self.br_ex, root_helper=self.root_helper) self.que.enqueue(constants.Q_UPL_PRIO, upl_msg) return True
Process the case when uplink interface becomes part of a bond. This is called to check if the phy interface became a part of the bond. If the below condition is True, this means, a physical interface that was not a part of a bond was earlier discovered as uplink and now that interface became part of the bond. Usually, this doesn't happen as LLDP and in turn this function will first detect a 'down' followed by an 'up'. When regular interface becomes part of bond, it's rare for it to hit this 'normal' case. But, still providing the functionality if it happens. The following is done : a. Bring down the physical interface by sending a 'down' event b. Add the bond interface by sending an 'up' event Consquently, when bond is added that will be assigned to self.phy_uplink. Then, the below condition will be False. i.e.. 'get_bond_intf' will return False, when the argument is 'bond0'.
entailment
def check_periodic_bulk_vm_notif_rcvd(self): """Bulk VM check handler called from periodic uplink detection. This gets called by the 'normal' stage of uplink detection. The bulk VM event sends all the VM's running in this agent. Sometimes during upgrades, it was found that due to some race condition, the server does not send the Bulk VM event. Whenever, a save_uplink is done by the agent, the server sends the Bulk VM event. If Bulk VM event is not received after few attempts, save_uplink is done to request the Bulk VM list. It's not protected with a mutex, since worst case, Bulk VM event will be sent twice, which is not that bad. When uplink is detected for the first time, it will hit the below else case and there a save_uplink is anyways done. """ if not self.bulk_vm_rcvd_flag: if self.bulk_vm_check_cnt >= 1: self.bulk_vm_check_cnt = 0 self.save_uplink(uplink=self.phy_uplink, veth_intf=self.veth_intf) LOG.info("Doing save_uplink again to request " "Bulk VM's") else: LOG.info("Bulk VM not received, incrementing count") self.bulk_vm_check_cnt += 1
Bulk VM check handler called from periodic uplink detection. This gets called by the 'normal' stage of uplink detection. The bulk VM event sends all the VM's running in this agent. Sometimes during upgrades, it was found that due to some race condition, the server does not send the Bulk VM event. Whenever, a save_uplink is done by the agent, the server sends the Bulk VM event. If Bulk VM event is not received after few attempts, save_uplink is done to request the Bulk VM list. It's not protected with a mutex, since worst case, Bulk VM event will be sent twice, which is not that bad. When uplink is detected for the first time, it will hit the below else case and there a save_uplink is anyways done.
entailment
def static_uplink_detect(self, veth): """Return the static uplink based on argument passed. The very first time, this function is called, it returns the uplink port read from a file. After restart, when this function is called the first time, it returns 'normal' assuming a veth is passed to this function which will be the case if uplink processing is successfully done. If user modified the uplink configuration and restarted, a 'down' will be returned to clear the old uplink. """ LOG.info("In static_uplink_detect %(veth)s", {'veth': veth}) if self.static_uplink_first: self.static_uplink_first = False if self.phy_uplink is not None and ( self.phy_uplink != self.static_uplink_port): return 'down' if veth is None: return self.static_uplink_port else: return 'normal'
Return the static uplink based on argument passed. The very first time, this function is called, it returns the uplink port read from a file. After restart, when this function is called the first time, it returns 'normal' assuming a veth is passed to this function which will be the case if uplink processing is successfully done. If user modified the uplink configuration and restarted, a 'down' will be returned to clear the old uplink.
entailment
def vdp_uplink_proc(self): """Periodic handler to detect the uplink interface to the switch. -> restart_uplink_called: should be called by agent initially to set the stored uplink and veth from DB -> process_uplink_ongoing: Will be set when uplink message is enqueue and reset when dequeued and processed completely -> uplink_det_compl: Will be set to True when a valid uplink is detected and object created. Will be reset when uplink is down -> phy_uplink: Is the uplink interface -> veth_intf : Signifies the veth interface. """ LOG.info("In Periodic Uplink Task") if not self.is_os_run: if not self.is_openstack_running(): LOG.info("OpenStack is not running") return else: self.is_os_run = True if not self.restart_uplink_called or self.process_uplink_ongoing: LOG.info("Uplink before restart not refreshed yet..states " "%(ruc)d %(puo)d", {'ruc': self.restart_uplink_called, 'puo': self.process_uplink_ongoing}) return if self.phy_uplink is not None: if (self.uplink_det_compl and ( self.phy_uplink not in self.ovs_vdp_obj_dict)): LOG.error("Not Initialized for phy %s", self.phy_uplink) return if self.phy_uplink in self.ovs_vdp_obj_dict: self.veth_intf = (self.ovs_vdp_obj_dict[self.phy_uplink]. get_lldp_local_bridge_port()) # The below logic has a bug when agent is started # and openstack is not running fixme(padkrish) else: if self.veth_intf is None: LOG.error("Incorrect state, Bug") return if self.static_uplink: ret = self.static_uplink_detect(self.veth_intf) else: ret = uplink_det.detect_uplink(self.veth_intf) if ret is 'down': if self.phy_uplink is None: LOG.error("Wrong status down") return # Call API to set the uplink as "" DOWN event self.uplink_down_cnt = self.uplink_down_cnt + 1 if not self.static_uplink and ( self.uplink_down_cnt < constants.UPLINK_DOWN_THRES): return self.process_uplink_ongoing = True upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE, status='down', phy_uplink=self.phy_uplink, br_int=self.br_integ, br_ex=self.br_ex, root_helper=self.root_helper) self.que.enqueue(constants.Q_UPL_PRIO, upl_msg) self.phy_uplink = None self.veth_intf = None self.uplink_det_compl = False self.uplink_down_cnt = 0 elif ret is None: if self.veth_intf is not None: LOG.error("Wrong status None") return # Call API to set the uplink as "" Uplink not discovered yet self.save_uplink(fail_reason=constants.uplink_undiscovered_reason) elif ret is 'normal': if self.veth_intf is None: LOG.error("Wrong status Normal") return # Uplink already discovered, nothing to be done here # Resetting it back, happens when uplink was down for a very short # time and no need to remove flows self.uplink_down_cnt = 0 bond_det = self.uplink_bond_intf_process() # Revisit this logic. # If uplink detection fails, it will be put in Error queue, which # will dequeue and put it back in the main queue # At the same time this periodic task will also hit this normal # state and will put the message in main queue. fixme(padkrish) # The below lines are put here because after restart when # eth/veth are passed to uplink script, it will return normal # But OVS object would not have been created for the first time, # so the below lines ensures it's done. if not self.uplink_det_compl and not bond_det: if self.phy_uplink is None: LOG.error("Incorrect state, bug") return self.process_uplink_ongoing = True upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE, status='up', phy_uplink=self.phy_uplink, br_int=self.br_integ, br_ex=self.br_ex, root_helper=self.root_helper) self.que.enqueue(constants.Q_UPL_PRIO, upl_msg) # yield LOG.info("Enqueued Uplink Msg from normal") self.check_periodic_bulk_vm_notif_rcvd() else: LOG.info("In Periodic Uplink Task uplink found %s", ret) bond_intf = sys_utils.get_bond_intf(ret) if bond_intf is not None: ret = bond_intf LOG.info("Interface %(memb)s part of bond %(bond)s" % {'memb': ret, 'bond': bond_intf}) # Call API to set the uplink as ret self.save_uplink(uplink=ret, veth_intf=self.veth_intf) self.phy_uplink = ret self.process_uplink_ongoing = True upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE, status='up', phy_uplink=self.phy_uplink, br_int=self.br_integ, br_ex=self.br_ex, root_helper=self.root_helper) self.que.enqueue(constants.Q_UPL_PRIO, upl_msg) # yield LOG.info("Enqueued Uplink Msg")
Periodic handler to detect the uplink interface to the switch. -> restart_uplink_called: should be called by agent initially to set the stored uplink and veth from DB -> process_uplink_ongoing: Will be set when uplink message is enqueue and reset when dequeued and processed completely -> uplink_det_compl: Will be set to True when a valid uplink is detected and object created. Will be reset when uplink is down -> phy_uplink: Is the uplink interface -> veth_intf : Signifies the veth interface.
entailment
def get_hosting_device_config(self, client, hosting_device_id): """Get config of hosting_device.""" return client.get((self.resource_path + HOSTING_DEVICE_CONFIG) % hosting_device_id)
Get config of hosting_device.
entailment
def get_client_class(self, client_class_name): """Returns a specific client class details from CPNR server.""" request_url = self._build_url(['ClientClass', client_class_name]) return self._do_request('GET', request_url)
Returns a specific client class details from CPNR server.
entailment
def get_vpn(self, vpn_name): """Returns a specific VPN name details from CPNR server.""" request_url = self._build_url(['VPN', vpn_name]) return self._do_request('GET', request_url)
Returns a specific VPN name details from CPNR server.
entailment
def get_scopes(self, vpnid='.*'): """Returns a list of all the scopes from CPNR server.""" request_url = self._build_url(['Scope'], vpn=vpnid) return self._do_request('GET', request_url)
Returns a list of all the scopes from CPNR server.
entailment
def get_scope(self, scope_name): """Returns a specific scope name details from CPNR server.""" request_url = self._build_url(['Scope', scope_name]) return self._do_request('GET', request_url)
Returns a specific scope name details from CPNR server.
entailment
def get_client_entry(self, client_entry_name): """Returns a specific client entry name details from CPNR server.""" request_url = self._build_url(['ClientEntry', client_entry_name]) return self._do_request('GET', request_url)
Returns a specific client entry name details from CPNR server.
entailment
def release_address(self, address, vpnid): """Release a specific lease, called after delete_client_entry""" query = address + "?action=releaseAddress&vpnId=" + vpnid request_url = self._build_url(['Lease', query]) return self._do_request('DELETE', request_url)
Release a specific lease, called after delete_client_entry
entailment
def qsize(self, qname): """Return the approximate size of the queue.""" if qname in self._queues: return self._queues[qname].qsize() else: raise ValueError(_("queue %s is not defined"), qname)
Return the approximate size of the queue.
entailment
def get_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): """Lists a nexusport binding.""" LOG.debug("get_nexusport_binding() called") return _lookup_all_nexus_bindings(port_id=port_id, vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id)
Lists a nexusport binding.
entailment
def get_nexus_switchport_binding(port_id, switch_ip): """Lists all bindings for this switch & port.""" LOG.debug("get_nexus_switchport_binding() called") return _lookup_all_nexus_bindings(port_id=port_id, switch_ip=switch_ip)
Lists all bindings for this switch & port.
entailment
def get_nexusvlan_binding(vlan_id, switch_ip): """Lists a vlan and switch binding.""" LOG.debug("get_nexusvlan_binding() called") return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip)
Lists a vlan and switch binding.
entailment
def get_reserved_bindings(vlan_id, instance_id, switch_ip=None, port_id=None): """Lists reserved bindings.""" LOG.debug("get_reserved_bindings() called") if port_id: return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id, port_id=port_id) elif switch_ip: return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id) else: return _lookup_all_nexus_bindings(vlan_id=vlan_id, instance_id=instance_id)
Lists reserved bindings.
entailment
def update_reserved_binding(vlan_id, switch_ip, instance_id, port_id, is_switch_binding=True, is_native=False, ch_grp=0): """Updates reserved binding. This overloads port bindings to support reserved Switch binding used to maintain the state of a switch so it can be viewed by all other neutron processes. There's also the case of a reserved port binding to keep switch information on a given interface. The values of these arguments is as follows: :param vlan_id: 0 :param switch_ip: ip address of the switch :param instance_id: fixed string RESERVED_NEXUS_SWITCH_DEVICE_ID_R1 :param port_id: switch-state of ACTIVE, RESTORE_S1, RESTORE_S2, INACTIVE : port-expected port_id :param ch_grp: 0 if no port-channel else non-zero integer """ if not port_id: LOG.warning("update_reserved_binding called with no state") return LOG.debug("update_reserved_binding called") session = bc.get_writer_session() if is_switch_binding: # For reserved switch binding binding = _lookup_one_nexus_binding(session=session, vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id) binding.port_id = port_id else: # For reserved port binding binding = _lookup_one_nexus_binding(session=session, vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id, port_id=port_id) binding.is_native = is_native binding.channel_group = ch_grp session.merge(binding) session.flush() return binding
Updates reserved binding. This overloads port bindings to support reserved Switch binding used to maintain the state of a switch so it can be viewed by all other neutron processes. There's also the case of a reserved port binding to keep switch information on a given interface. The values of these arguments is as follows: :param vlan_id: 0 :param switch_ip: ip address of the switch :param instance_id: fixed string RESERVED_NEXUS_SWITCH_DEVICE_ID_R1 :param port_id: switch-state of ACTIVE, RESTORE_S1, RESTORE_S2, INACTIVE : port-expected port_id :param ch_grp: 0 if no port-channel else non-zero integer
entailment
def remove_reserved_binding(vlan_id, switch_ip, instance_id, port_id): """Removes reserved binding. This overloads port bindings to support reserved Switch binding used to maintain the state of a switch so it can be viewed by all other neutron processes. There's also the case of a reserved port binding to keep switch information on a given interface. The values of these arguments is as follows: :param vlan_id: 0 :param switch_ip: ip address of the switch :param instance_id: fixed string RESERVED_NEXUS_SWITCH_DEVICE_ID_R1 :param port_id: switch-state of ACTIVE, RESTORE_S1, RESTORE_S2, INACTIVE : port-expected port_id """ if not port_id: LOG.warning("remove_reserved_binding called with no state") return LOG.debug("remove_reserved_binding called") session = bc.get_writer_session() binding = _lookup_one_nexus_binding(session=session, vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id, port_id=port_id) for bind in binding: session.delete(bind) session.flush() return binding
Removes reserved binding. This overloads port bindings to support reserved Switch binding used to maintain the state of a switch so it can be viewed by all other neutron processes. There's also the case of a reserved port binding to keep switch information on a given interface. The values of these arguments is as follows: :param vlan_id: 0 :param switch_ip: ip address of the switch :param instance_id: fixed string RESERVED_NEXUS_SWITCH_DEVICE_ID_R1 :param port_id: switch-state of ACTIVE, RESTORE_S1, RESTORE_S2, INACTIVE : port-expected port_id
entailment
def add_reserved_switch_binding(switch_ip, state): """Add a reserved switch binding.""" # overload port_id to contain switch state add_nexusport_binding( state, const.NO_VLAN_OR_VNI_ID, const.NO_VLAN_OR_VNI_ID, switch_ip, const.RESERVED_NEXUS_SWITCH_DEVICE_ID_R1)
Add a reserved switch binding.
entailment
def update_reserved_switch_binding(switch_ip, state): """Update a reserved switch binding.""" # overload port_id to contain switch state update_reserved_binding( const.NO_VLAN_OR_VNI_ID, switch_ip, const.RESERVED_NEXUS_SWITCH_DEVICE_ID_R1, state)
Update a reserved switch binding.
entailment
def add_nexusport_binding(port_id, vlan_id, vni, switch_ip, instance_id, is_native=False, ch_grp=0): """Adds a nexusport binding.""" LOG.debug("add_nexusport_binding() called") session = bc.get_writer_session() binding = nexus_models_v2.NexusPortBinding(port_id=port_id, vlan_id=vlan_id, vni=vni, switch_ip=switch_ip, instance_id=instance_id, is_native=is_native, channel_group=ch_grp) session.add(binding) session.flush() return binding
Adds a nexusport binding.
entailment
def remove_nexusport_binding(port_id, vlan_id, vni, switch_ip, instance_id): """Removes a nexusport binding.""" LOG.debug("remove_nexusport_binding() called") session = bc.get_writer_session() binding = _lookup_all_nexus_bindings(session=session, vlan_id=vlan_id, vni=vni, switch_ip=switch_ip, port_id=port_id, instance_id=instance_id) for bind in binding: session.delete(bind) session.flush() return binding
Removes a nexusport binding.
entailment
def update_nexusport_binding(port_id, new_vlan_id): """Updates nexusport binding.""" if not new_vlan_id: LOG.warning("update_nexusport_binding called with no vlan") return LOG.debug("update_nexusport_binding called") session = bc.get_writer_session() binding = _lookup_one_nexus_binding(session=session, port_id=port_id) binding.vlan_id = new_vlan_id session.merge(binding) session.flush() return binding
Updates nexusport binding.
entailment
def remove_all_nexusport_bindings(): """Removes all nexusport bindings.""" LOG.debug("remove_all_nexusport_bindings() called") session = bc.get_writer_session() session.query(nexus_models_v2.NexusPortBinding).delete() session.flush()
Removes all nexusport bindings.
entailment
def get_nexusvm_bindings(vlan_id, instance_id): """Lists nexusvm bindings.""" LOG.debug("get_nexusvm_bindings() called") return _lookup_all_nexus_bindings(instance_id=instance_id, vlan_id=vlan_id)
Lists nexusvm bindings.
entailment
def get_port_vlan_switch_binding(port_id, vlan_id, switch_ip): """Lists nexusvm bindings.""" LOG.debug("get_port_vlan_switch_binding() called") return _lookup_all_nexus_bindings(port_id=port_id, switch_ip=switch_ip, vlan_id=vlan_id)
Lists nexusvm bindings.
entailment