sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def alphasnake(string): """Convert to snakecase removing non alpha numerics Word #word -> word_word. """ if string: string = " ".join( [re.sub(r'\W+', '', word) for word in string.split()] ) string = decamel_to_snake(string) return string
Convert to snakecase removing non alpha numerics Word #word -> word_word.
entailment
def decamel(string): """"Split CamelCased words. CamelCase -> Camel Case, dromedaryCase -> dromedary Case. """ regex = re.compile(r'(\B[A-Z][a-z]*)') return regex.sub(r' \1', string)
Split CamelCased words. CamelCase -> Camel Case, dromedaryCase -> dromedary Case.
entailment
def decamel_to_snake(string): """Convert to lower case, join camel case with underscore. CamelCase -> camel_case. Camel Case -> camel_case. """ strings = [decamel(word) if not word.isupper() else word.lower() for word in string.split()] return "_".join([snake(dstring)for dstring in strings])
Convert to lower case, join camel case with underscore. CamelCase -> camel_case. Camel Case -> camel_case.
entailment
def info_file(distro=None): """Return default distroinfo info file""" if not distro: distro = cfg['DISTRO'] info_file_conf = distro.upper() + 'INFO_FILE' try: return cfg[info_file_conf] except KeyError: raise exception.InvalidUsage( why="Couldn't find config option %s for distro: %s" % (info_file_conf, distro))
Return default distroinfo info file
entailment
def get_distroinfo(distro=None): """Get DistroInfo initialized from configuration""" if not distro: distro = cfg['DISTRO'] _info_file = info_file(distro) # prefer git fetcher if available git_info_url_conf = distro.upper() + 'INFO_REPO' try: remote_git_info = cfg[git_info_url_conf] return DistroInfo(_info_file, remote_git_info=remote_git_info) except KeyError: pass # try raw remote fetcher remote_info_url_conf = distro.upper() + 'INFO_RAW_URL' try: remote_info = cfg[remote_info_url_conf] return DistroInfo(_info_file, remote_info=remote_info) except KeyError: raise exception.InvalidUsage( why="Couldn't find config option %s or %s for distro: %s" % (git_info_url_conf, remote_info_url_conf, distro))
Get DistroInfo initialized from configuration
entailment
def split_filename(filename): """ Received a standard style rpm fullname and returns name, version, release, epoch, arch Example: foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64 This function replaces rpmUtils.miscutils.splitFilename, see https://bugzilla.redhat.com/1452801 """ # Remove .rpm suffix if filename.endswith('.rpm'): filename = filename.split('.rpm')[0] # is there an epoch? components = filename.split(':') if len(components) > 1: epoch = components[0] else: epoch = '' # Arch is the last item after . arch = filename.rsplit('.')[-1] remaining = filename.rsplit('.%s' % arch)[0] release = remaining.rsplit('-')[-1] version = remaining.rsplit('-')[-2] name = '-'.join(remaining.rsplit('-')[:-2]) return name, version, release, epoch, arch
Received a standard style rpm fullname and returns name, version, release, epoch, arch Example: foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64 This function replaces rpmUtils.miscutils.splitFilename, see https://bugzilla.redhat.com/1452801
entailment
def string_to_version(verstring): """ Return a tuple of (epoch, version, release) from a version string This function replaces rpmUtils.miscutils.stringToVersion, see https://bugzilla.redhat.com/1364504 """ # is there an epoch? components = verstring.split(':') if len(components) > 1: epoch = components[0] else: epoch = 0 remaining = components[:2][0].split('-') version = remaining[0] release = remaining[1] return (epoch, version, release)
Return a tuple of (epoch, version, release) from a version string This function replaces rpmUtils.miscutils.stringToVersion, see https://bugzilla.redhat.com/1364504
entailment
def spec_fn(spec_dir='.'): """ Return the filename for a .spec file in this directory. """ specs = [f for f in os.listdir(spec_dir) if os.path.isfile(f) and f.endswith('.spec')] if not specs: raise exception.SpecFileNotFound() if len(specs) != 1: raise exception.MultipleSpecFilesFound() return specs[0]
Return the filename for a .spec file in this directory.
entailment
def version_parts(version): """ Split a version string into numeric X.Y.Z part and the rest (milestone). """ m = re.match(r'(\d+(?:\.\d+)*)([.%]|$)(.*)', version) if m: numver = m.group(1) rest = m.group(2) + m.group(3) return numver, rest else: return version, ''
Split a version string into numeric X.Y.Z part and the rest (milestone).
entailment
def release_parts(version): """ Split RPM Release string into (numeric X.Y.Z part, milestone, rest). :returns: a three-element tuple (number, milestone, rest). If we cannot determine the "milestone" or "rest", those will be an empty string. """ numver, tail = version_parts(version) if numver and not re.match(r'\d', numver): # entire release is macro a la %{release} tail = numver numver = '' m = re.match(r'(\.?(?:%\{\?milestone\}|[^%.]+))(.*)$', tail) if m: milestone = m.group(1) rest = m.group(2) else: milestone = '' rest = tail return numver, milestone, rest
Split RPM Release string into (numeric X.Y.Z part, milestone, rest). :returns: a three-element tuple (number, milestone, rest). If we cannot determine the "milestone" or "rest", those will be an empty string.
entailment
def get_magic_comment(self, name, expand_macros=False): """Return a value of # name=value comment in spec or None.""" match = re.search(r'^#\s*?%s\s?=\s?(\S+)' % re.escape(name), self.txt, flags=re.M) if not match: return None val = match.group(1) if expand_macros and has_macros(val): # don't parse using rpm unless required val = self.expand_macro(val) return val
Return a value of # name=value comment in spec or None.
entailment
def get_patches_base(self, expand_macros=False): """Return a tuple (version, number_of_commits) that are parsed from the patches_base in the specfile. """ match = re.search(r'(?<=patches_base=)[\w.+?%{}]+', self.txt) if not match: return None, 0 patches_base = match.group() if expand_macros and has_macros(patches_base): # don't parse using rpm unless required patches_base = self.expand_macro(patches_base) patches_base_ref, _, n_commits = patches_base.partition('+') try: n_commits = int(n_commits) except ValueError: n_commits = 0 return patches_base_ref, n_commits
Return a tuple (version, number_of_commits) that are parsed from the patches_base in the specfile.
entailment
def get_patches_ignore_regex(self): """Returns a string representing a regex for filtering out patches This string is parsed from a comment in the specfile that contains the word filter-out followed by an equal sign. For example, a comment as such: # patches_ignore=(regex) would mean this method returns the string '(regex)' Only a very limited subset of characters are accepted so no fancy stuff like matching groups etc. """ match = re.search(r'# *patches_ignore=([\w *.+?[\]|{,}\-_]+)', self.txt) if not match: return None regex_string = match.group(1) try: return re.compile(regex_string) except Exception: return None
Returns a string representing a regex for filtering out patches This string is parsed from a comment in the specfile that contains the word filter-out followed by an equal sign. For example, a comment as such: # patches_ignore=(regex) would mean this method returns the string '(regex)' Only a very limited subset of characters are accepted so no fancy stuff like matching groups etc.
entailment
def recognized_release(self): """ Check if this Release value is something we can parse. :rtype: bool """ _, _, rest = self.get_release_parts() # If "rest" is not a well-known value here, then this package is # using a Release value pattern we cannot recognize. if rest == '' or re.match(r'%{\??dist}', rest): return True return False
Check if this Release value is something we can parse. :rtype: bool
entailment
def get_vr(self, epoch=None): """get VR string from .spec Version, Release and Epoch epoch is None: prefix epoch if present (default) epoch is True: prefix epoch even if not present (0:) epoch is False: omit epoch even if present """ version = self.get_tag('Version', expand_macros=True) e = None if epoch is None or epoch: try: e = self.get_tag('Epoch') except exception.SpecFileParseError: pass if epoch is None and e: epoch = True if epoch: if not e: e = '0' version = '%s:%s' % (e, version) release = self.get_tag('Release') release = re.sub(r'%\{?\??dist\}?$', '', release) release = self.expand_macro(release) if release: return '%s-%s' % (version, release) return version
get VR string from .spec Version, Release and Epoch epoch is None: prefix epoch if present (default) epoch is True: prefix epoch even if not present (0:) epoch is False: omit epoch even if present
entailment
def get_nvr(self, epoch=None): """get NVR string from .spec Name, Version, Release and Epoch""" name = self.get_tag('Name', expand_macros=True) vr = self.get_vr(epoch=epoch) return '%s-%s' % (name, vr)
get NVR string from .spec Name, Version, Release and Epoch
entailment
def save(self): """ Write the textual content (self._txt) to .spec file (self.fn). """ if not self.txt: # no changes return if not self.fn: raise exception.InvalidAction( "Can't save .spec file without its file name specified.") f = codecs.open(self.fn, 'w', encoding='utf-8') f.write(self.txt) f.close() self._rpmspec = None
Write the textual content (self._txt) to .spec file (self.fn).
entailment
def detect_ip(kind): """ Detect IP address. kind can be: IPV4 - returns IPv4 address IPV6_ANY - returns any IPv6 address (no preference) IPV6_PUBLIC - returns public IPv6 address IPV6_TMP - returns temporary IPV6 address (privacy extensions) This function either returns an IP address (str) or raises a GetIpException. """ if kind not in (IPV4, IPV6_PUBLIC, IPV6_TMP, IPV6_ANY): raise ValueError("invalid kind specified") # We create an UDP socket and connect it to a public host. # We query the OS to know what our address is. # No packet will really be sent since we are using UDP. af = socket.AF_INET if kind == IPV4 else socket.AF_INET6 s = socket.socket(af, socket.SOCK_DGRAM) try: if kind in [IPV6_PUBLIC, IPV6_TMP, ]: # caller wants some specific kind of IPv6 address (not IPV6_ANY) try: if kind == IPV6_PUBLIC: preference = socket.IPV6_PREFER_SRC_PUBLIC elif kind == IPV6_TMP: preference = socket.IPV6_PREFER_SRC_TMP s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_ADDR_PREFERENCES, preference) except socket.error as e: if e.errno == errno.ENOPROTOOPT: raise GetIpException("Kernel doesn't support IPv6 address preference") else: raise GetIpException("Unable to set IPv6 address preference: %s" % e) try: outside_ip = OUTSIDE_IPV4 if kind == IPV4 else OUTSIDE_IPV6 s.connect((outside_ip, 9)) except (socket.error, socket.gaierror) as e: raise GetIpException(str(e)) ip = s.getsockname()[0] finally: s.close() return ip
Detect IP address. kind can be: IPV4 - returns IPv4 address IPV6_ANY - returns any IPv6 address (no preference) IPV6_PUBLIC - returns public IPv6 address IPV6_TMP - returns temporary IPV6 address (privacy extensions) This function either returns an IP address (str) or raises a GetIpException.
entailment
def setup_kojiclient(profile): """Setup koji client session """ opts = koji.read_config(profile) for k, v in opts.iteritems(): opts[k] = os.path.expanduser(v) if type(v) is str else v kojiclient = koji.ClientSession(opts['server'], opts=opts) kojiclient.ssl_login(opts['cert'], None, opts['serverca']) return kojiclient
Setup koji client session
entailment
def retrieve_sources(): """Retrieve sources using spectool """ spectool = find_executable('spectool') if not spectool: log.warn('spectool is not installed') return try: specfile = spec_fn() except Exception: return cmd = [spectool, "-g", specfile] output = subprocess.check_output(' '.join(cmd), shell=True) log.warn(output)
Retrieve sources using spectool
entailment
def create_srpm(dist='el7'): """Create an srpm Requires that sources are available in local directory dist: set package dist tag (default: el7) """ if not RPM_AVAILABLE: raise RpmModuleNotAvailable() path = os.getcwd() try: specfile = spec_fn() spec = Spec(specfile) except Exception: return rpmdefines = ["--define 'dist .{}'".format(dist), "--define '_sourcedir {}'".format(path), "--define '_srcrpmdir {}'".format(path)] rpm.addMacro('_sourcedir', '.{}'.format(dist)) # FIXME: needs to be fixed in Spec rpm.addMacro('dist', '.{}'.format(dist)) module_name = spec.get_tag('Name', True) version = spec.get_tag('Version', True) release = spec.get_tag('Release', True) srpm = os.path.join(path, "{}-{}-{}.src.rpm".format(module_name, version, release)) # See if we need to build the srpm if os.path.exists(srpm): log.warn('Srpm found, rewriting it.') cmd = ['rpmbuild'] cmd.extend(rpmdefines) cmd.extend(['--nodeps', '-bs', specfile]) output = subprocess.check_output(' '.join(cmd), shell=True) log.warn(output) srpm = output.split()[1] return srpm
Create an srpm Requires that sources are available in local directory dist: set package dist tag (default: el7)
entailment
def compute_auth_key(userid, password): """ Compute the authentication key for freedns.afraid.org. This is the SHA1 hash of the string b'userid|password'. :param userid: ascii username :param password: ascii password :return: ascii authentication key (SHA1 at this point) """ import sys if sys.version_info >= (3, 0): return hashlib.sha1(b"|".join((userid.encode("ascii"), # noqa: S303 password.encode("ascii")))).hexdigest() return hashlib.sha1("|".join((userid, password))).hexdigest()
Compute the authentication key for freedns.afraid.org. This is the SHA1 hash of the string b'userid|password'. :param userid: ascii username :param password: ascii password :return: ascii authentication key (SHA1 at this point)
entailment
def records(credentials, url="https://freedns.afraid.org/api/"): """ Yield the dynamic DNS records associated with this account. :param credentials: an AfraidCredentials instance :param url: the service URL """ params = {"action": "getdyndns", "sha": credentials.sha} req = requests.get( url, params=params, headers=constants.REQUEST_HEADERS_DEFAULT, timeout=60) for record_line in (line.strip() for line in req.text.splitlines() if len(line.strip()) > 0): yield AfraidDynDNSRecord(*record_line.split("|"))
Yield the dynamic DNS records associated with this account. :param credentials: an AfraidCredentials instance :param url: the service URL
entailment
def update(url): """ Update remote DNS record by requesting its special endpoint URL. This automatically picks the IP address using the HTTP connection: it is not possible to specify the IP address explicitly. :param url: URL to retrieve for triggering the update :return: IP address """ req = requests.get( url, headers=constants.REQUEST_HEADERS_DEFAULT, timeout=60) req.close() # Response must contain an IP address, or else we can't parse it. # Also, the IP address in the response is the newly assigned IP address. ipregex = re.compile(r"\b(?P<ip>(?:[0-9]{1,3}\.){3}[0-9]{1,3})\b") ipmatch = ipregex.search(req.text) if ipmatch: return str(ipaddress(ipmatch.group("ip"))) LOG.error("couldn't parse the server's response '%s'", req.text) return None
Update remote DNS record by requesting its special endpoint URL. This automatically picks the IP address using the HTTP connection: it is not possible to specify the IP address explicitly. :param url: URL to retrieve for triggering the update :return: IP address
entailment
def sha(self): """Return sha, lazily compute if not done yet.""" if self._sha is None: self._sha = compute_auth_key(self.userid, self.password) return self._sha
Return sha, lazily compute if not done yet.
entailment
def update(self, *args, **kwargs): """Update the IP on the remote service.""" # first find the update_url for the provided account + hostname: update_url = next((r.update_url for r in records(self._credentials, self._url) if r.hostname == self.hostname), None) if update_url is None: LOG.warning("Could not find hostname '%s' at '%s'", self.hostname, self._url) return None return update(update_url)
Update the IP on the remote service.
entailment
def register_observer(self, observer, events=None): """Register a listener function. :param observer: external listener function :param events: tuple or list of relevant events (default=None) """ if events is not None and not isinstance(events, (tuple, list)): events = (events,) if observer in self._observers: LOG.warning("Observer '%r' already registered, overwriting for events" " %r", observer, events) self._observers[observer] = events
Register a listener function. :param observer: external listener function :param events: tuple or list of relevant events (default=None)
entailment
def notify_observers(self, event=None, msg=None): """Notify observers.""" for observer, events in list(self._observers.items()): # LOG.debug("trying to notify the observer") if events is None or event is None or event in events: try: observer(self, event, msg) except (Exception,) as ex: # pylint: disable=broad-except self.unregister_observer(observer) errmsg = "Exception in message dispatch: Handler '{0}' unregistered for event '{1}' ".format( observer.__class__.__name__, event) LOG.error(errmsg, exc_info=ex)
Notify observers.
entailment
def detect(self): """Detect the IP address.""" if self.opts_family == AF_INET6: kind = IPV6_PUBLIC else: # 'INET': kind = IPV4 theip = None try: theip = detect_ip(kind) except GetIpException: LOG.exception("socket detector raised an exception:") self.set_current_value(theip) return theip
Detect the IP address.
entailment
def _detect(self): """Use the netifaces module to detect ifconfig information.""" theip = None try: if self.opts_family == AF_INET6: addrlist = netifaces.ifaddresses(self.opts_iface)[netifaces.AF_INET6] else: addrlist = netifaces.ifaddresses(self.opts_iface)[netifaces.AF_INET] except ValueError as exc: LOG.error("netifaces choked while trying to get network interface" " information for interface '%s'", self.opts_iface, exc_info=exc) else: # now we have a list of addresses as returned by netifaces for pair in addrlist: try: detip = ipaddress(pair["addr"]) except (TypeError, ValueError) as exc: LOG.debug("Found invalid IP '%s' on interface '%s'!?", pair["addr"], self.opts_iface, exc_info=exc) continue if self.netmask is not None: if detip in self.netmask: theip = pair["addr"] else: continue else: theip = pair["addr"] break # we use the first IP found # theip can still be None at this point! self.set_current_value(theip) return theip
Use the netifaces module to detect ifconfig information.
entailment
def clean_tempdir(context, scenario): """ Clean up temporary test dirs for passed tests. Leave failed test dirs for manual inspection. """ tempdir = getattr(context, 'tempdir', None) if tempdir and scenario.status == 'passed': shutil.rmtree(tempdir) del(context.tempdir)
Clean up temporary test dirs for passed tests. Leave failed test dirs for manual inspection.
entailment
def is_reserved_ip(self, ip): """Check if the given ip address is in a reserved ipv4 address space. :param ip: ip address :return: boolean """ theip = ipaddress(ip) for res in self._reserved_netmasks: if theip in ipnetwork(res): return True return False
Check if the given ip address is in a reserved ipv4 address space. :param ip: ip address :return: boolean
entailment
def random_public_ip(self): """Return a randomly generated, public IPv4 address. :return: ip address """ randomip = random_ip() while self.is_reserved_ip(randomip): randomip = random_ip() return randomip
Return a randomly generated, public IPv4 address. :return: ip address
entailment
def detect(self): """Detect IP and return it.""" for theip in self.rips: LOG.debug("detected %s", str(theip)) self.set_current_value(str(theip)) return str(theip)
Detect IP and return it.
entailment
def update(self, ip): """Update the IP on the remote service.""" timeout = 60 LOG.debug("Updating '%s' to '%s' at service '%s'", self.hostname, ip, self._updateurl) params = {"myip": ip, "hostname": self.hostname} req = requests.get(self._updateurl, params=params, headers=constants.REQUEST_HEADERS_DEFAULT, auth=(self.__userid, self.__password), timeout=timeout) LOG.debug("status %i, %s", req.status_code, req.text) if req.status_code == 200: # responses can also be "nohost", "abuse", "911", "notfqdn" if req.text.startswith("good ") or req.text.startswith("nochg"): return ip return req.text return "invalid http status code: %s" % req.status_code
Update the IP on the remote service.
entailment
def patches_base_ref(default=exception.CantGuess): """Return a git reference to patches branch base. Returns first part of .spec's patches_base is found, otherwise return Version(+%{milestone}). """ ref = None try: spec = specfile.Spec() ref, _ = spec.get_patches_base(expand_macros=True) if ref: ref, _ = tag2version(ref) else: ref = spec.get_tag('Version', expand_macros=True) milestone = spec.get_milestone() if milestone: ref += milestone if not ref: raise exception.CantGuess(msg="got empty .spec Version") except Exception as ex: if default is exception.CantGuess: raise exception.CantGuess( what="current package version", why=str(ex)) else: return default tag_style = version_tag_style(ref) return version2tag(ref, tag_style=tag_style)
Return a git reference to patches branch base. Returns first part of .spec's patches_base is found, otherwise return Version(+%{milestone}).
entailment
def display_listitems(items, url): '''Displays a list of items along with the index to enable a user to select an item. ''' if (len(items) == 2 and items[0].get_label() == '..' and items[1].get_played()): display_video(items) else: label_width = get_max_len(item.get_label() for item in items) num_width = len(str(len(items))) output = [] for i, item in enumerate(items): output.append('[%s] %s (%s)' % ( str(i).rjust(num_width), item.get_label().ljust(label_width), item.get_path())) line_width = get_max_len(output) output.append('-' * line_width) header = [ '', '=' * line_width, 'Current URL: %s' % url, '-' * line_width, '%s %s Path' % ('#'.center(num_width + 2), 'Label'.ljust(label_width)), '-' * line_width, ] print '\n'.join(header + output)
Displays a list of items along with the index to enable a user to select an item.
entailment
def display_video(items): '''Prints a message for a playing video and displays the parent listitem. ''' parent_item, played_item = items title_line = 'Playing Media %s (%s)' % (played_item.get_label(), played_item.get_path()) parent_line = '[0] %s (%s)' % (parent_item.get_label(), parent_item.get_path()) line_width = get_max_len([title_line, parent_line]) output = [ '-' * line_width, title_line, '-' * line_width, parent_line, ] print '\n'.join(output)
Prints a message for a playing video and displays the parent listitem.
entailment
def get_user_choice(items): '''Returns the selected item from provided items or None if 'q' was entered for quit. ''' choice = raw_input('Choose an item or "q" to quit: ') while choice != 'q': try: item = items[int(choice)] print # Blank line for readability between interactive views return item except ValueError: # Passed something that cound't be converted with int() choice = raw_input('You entered a non-integer. Choice must be an' ' integer or "q": ') except IndexError: # Passed an integer that was out of range of the list of urls choice = raw_input('You entered an invalid integer. Choice must be' ' from above url list or "q": ') return None
Returns the selected item from provided items or None if 'q' was entered for quit.
entailment
def decode(data): """ Decode data employing some charset detection and including unicode BOM stripping. """ if isinstance(data, unicode): return data # Detect standard unicode BOMs. for bom, encoding in UNICODE_BOMS: if data.startswith(bom): return data[len(bom):].decode(encoding, errors='ignore') # Try straight UTF-8. try: return data.decode('utf-8') except UnicodeDecodeError: pass # Test for various common encodings. for encoding in COMMON_ENCODINGS: try: return data.decode(encoding) except UnicodeDecodeError: pass # Anything else gets filtered. return NON_ASCII_FILTER.sub('', data).decode('ascii', errors='replace')
Decode data employing some charset detection and including unicode BOM stripping.
entailment
def get_context(self, line=1, column=0): 'Returns a tuple containing the context for a line' line -= 1 # The line is one-based # If there is no data in the file, there can be no context. datalen = len(self.data) if datalen <= line: return None build = [self.data[line]] # Add surrounding lines if they're available. There must always be # three elements in the context. if line > 0: build.insert(0, self.data[line - 1]) else: build.insert(0, None) if line < datalen - 1: build.append(self.data[line + 1]) else: build.append(None) leading_counts = [] # Count whitespace to determine how much needs to be stripped. lstrip_count = INFINITY for line in build: # Don't count empty/whitespace-only lines. if line is None or not line.strip(): leading_counts.append(lstrip_count) continue # Isolate the leading whitespace. ws_count = len(line) - len(line.lstrip()) leading_counts.append(ws_count) if ws_count < lstrip_count: lstrip_count = ws_count # If all of the lines were skipped over, it means everything was # whitespace. if lstrip_count == INFINITY: return ('', '', '') for lnum in range(3): # Skip edge lines. if not build[lnum]: continue line = build[lnum].strip() # Empty lines stay empty. if not line: build[lnum] = '' continue line = self._format_line(line, column=column, rel_line=lnum) line = '%s%s' % (' ' * (leading_counts[lnum] - lstrip_count), line) build[lnum] = line # Return the final output as a tuple. return tuple(build)
Returns a tuple containing the context for a line
entailment
def _format_line(self, data, column=0, rel_line=1): 'Formats a line from the data to be the appropriate length' line_length = len(data) if line_length > 140: if rel_line == 0: # Trim from the beginning data = '... %s' % data[-140:] elif rel_line == 1: # Trim surrounding the error position if column < 70: data = '%s ...' % data[:140] elif column > line_length - 70: data = '... %s' % data[-140:] else: data = '... %s ...' % data[column - 70:column + 70] elif rel_line == 2: # Trim from the end data = '%s ...' % data[:140] data = unicodehelper.decode(data) return data
Formats a line from the data to be the appropriate length
entailment
def get_line(self, position): 'Returns the line number that the given string position is found on' datalen = len(self.data) count = len(self.data[0]) line = 1 while count < position: if line >= datalen: break count += len(self.data[line]) + 1 line += 1 return line
Returns the line number that the given string position is found on
entailment
def load_addon_strings(addon, filename): '''This is not an official XBMC method, it is here to faciliate mocking up the other methods when running outside of XBMC.''' def get_strings(fn): xml = parse(fn) strings = dict((tag.getAttribute('id'), tag.firstChild.data) for tag in xml.getElementsByTagName('string')) #strings = {} #for tag in xml.getElementsByTagName('string'): #strings[tag.getAttribute('id')] = tag.firstChild.data return strings addon._strings = get_strings(filename)
This is not an official XBMC method, it is here to faciliate mocking up the other methods when running outside of XBMC.
entailment
def get_addon_id(addonxml): '''Parses an addon id from the given addon.xml filename.''' xml = parse(addonxml) addon_node = xml.getElementsByTagName('addon')[0] return addon_node.getAttribute('id')
Parses an addon id from the given addon.xml filename.
entailment
def get_addon_name(addonxml): '''Parses an addon name from the given addon.xml filename.''' xml = parse(addonxml) addon_node = xml.getElementsByTagName('addon')[0] return addon_node.getAttribute('name')
Parses an addon name from the given addon.xml filename.
entailment
def _create_dir(path): '''Creates necessary directories for the given path or does nothing if the directories already exist. ''' try: os.makedirs(path) except OSError, exc: if exc.errno == errno.EEXIST: pass else: raise
Creates necessary directories for the given path or does nothing if the directories already exist.
entailment
def translatePath(path): '''Creates folders in the OS's temp directory. Doesn't touch any possible XBMC installation on the machine. Attempting to do as little work as possible to enable this function to work seamlessly. ''' valid_dirs = ['xbmc', 'home', 'temp', 'masterprofile', 'profile', 'subtitles', 'userdata', 'database', 'thumbnails', 'recordings', 'screenshots', 'musicplaylists', 'videoplaylists', 'cdrips', 'skin', ] assert path.startswith('special://'), 'Not a valid special:// path.' parts = path.split('/')[2:] assert len(parts) > 1, 'Need at least a single root directory' assert parts[0] in valid_dirs, '%s is not a valid root dir.' % parts[0] # We don't want to swallow any potential IOErrors here, so only makedir for # the root dir, the user is responsible for making any further child dirs _create_dir(os.path.join(TEMP_DIR, parts[0])) return os.path.join(TEMP_DIR, *parts)
Creates folders in the OS's temp directory. Doesn't touch any possible XBMC installation on the machine. Attempting to do as little work as possible to enable this function to work seamlessly.
entailment
def _parse_request(self, url=None, handle=None): '''Handles setup of the plugin state, including request arguments, handle, mode. This method never needs to be called directly. For testing, see plugin.test() ''' # To accomdate self.redirect, we need to be able to parse a full url as # well if url is None: url = sys.argv[0] if len(sys.argv) == 3: url += sys.argv[2] if handle is None: handle = sys.argv[1] return Request(url, handle)
Handles setup of the plugin state, including request arguments, handle, mode. This method never needs to be called directly. For testing, see plugin.test()
entailment
def register_module(self, module, url_prefix): '''Registers a module with a plugin. Requires a url_prefix that will then enable calls to url_for. :param module: Should be an instance `xbmcswift2.Module`. :param url_prefix: A url prefix to use for all module urls, e.g. '/mymodule' ''' module._plugin = self module._url_prefix = url_prefix for func in module._register_funcs: func(self, url_prefix)
Registers a module with a plugin. Requires a url_prefix that will then enable calls to url_for. :param module: Should be an instance `xbmcswift2.Module`. :param url_prefix: A url prefix to use for all module urls, e.g. '/mymodule'
entailment
def cached_route(self, url_rule, name=None, options=None, TTL=None): '''A decorator to add a route to a view and also apply caching. The url_rule, name and options arguments are the same arguments for the route function. The TTL argument if given will passed along to the caching decorator. ''' route_decorator = self.route(url_rule, name=name, options=options) if TTL: cache_decorator = self.cached(TTL) else: cache_decorator = self.cached() def new_decorator(func): return route_decorator(cache_decorator(func)) return new_decorator
A decorator to add a route to a view and also apply caching. The url_rule, name and options arguments are the same arguments for the route function. The TTL argument if given will passed along to the caching decorator.
entailment
def route(self, url_rule, name=None, options=None): '''A decorator to add a route to a view. name is used to differentiate when there are multiple routes for a given view.''' # TODO: change options kwarg to defaults def decorator(f): view_name = name or f.__name__ self.add_url_rule(url_rule, f, name=view_name, options=options) return f return decorator
A decorator to add a route to a view. name is used to differentiate when there are multiple routes for a given view.
entailment
def add_url_rule(self, url_rule, view_func, name, options=None): '''This method adds a URL rule for routing purposes. The provided name can be different from the view function name if desired. The provided name is what is used in url_for to build a URL. The route decorator provides the same functionality. ''' rule = UrlRule(url_rule, view_func, name, options) if name in self._view_functions.keys(): # TODO: Raise exception for ambiguous views during registration log.warning('Cannot add url rule "%s" with name "%s". There is ' 'already a view with that name', url_rule, name) self._view_functions[name] = None else: log.debug('Adding url rule "%s" named "%s" pointing to function ' '"%s"', url_rule, name, view_func.__name__) self._view_functions[name] = rule self._routes.append(rule)
This method adds a URL rule for routing purposes. The provided name can be different from the view function name if desired. The provided name is what is used in url_for to build a URL. The route decorator provides the same functionality.
entailment
def url_for(self, endpoint, **items): '''Returns a valid XBMC plugin URL for the given endpoint name. endpoint can be the literal name of a function, or it can correspond to the name keyword arguments passed to the route decorator. Raises AmbiguousUrlException if there is more than one possible view for the given endpoint name. ''' try: rule = self._view_functions[endpoint] except KeyError: try: rule = (rule for rule in self._view_functions.values() if rule.view_func == endpoint).next() except StopIteration: raise NotFoundException( '%s doesn\'t match any known patterns.' % endpoint) # rule can be None since values of None are allowed in the # _view_functions dict. This signifies more than one view function is # tied to the same name. if not rule: # TODO: Make this a regular exception raise AmbiguousUrlException pathqs = rule.make_path_qs(items) return 'plugin://%s%s' % (self._addon_id, pathqs)
Returns a valid XBMC plugin URL for the given endpoint name. endpoint can be the literal name of a function, or it can correspond to the name keyword arguments passed to the route decorator. Raises AmbiguousUrlException if there is more than one possible view for the given endpoint name.
entailment
def redirect(self, url): '''Used when you need to redirect to another view, and you only have the final plugin:// url.''' # TODO: Should we be overriding self.request with the new request? new_request = self._parse_request(url=url, handle=self.request.handle) log.debug('Redirecting %s to %s', self.request.path, new_request.path) return self._dispatch(new_request.path)
Used when you need to redirect to another view, and you only have the final plugin:// url.
entailment
def run(self, test=False): '''The main entry point for a plugin.''' self._request = self._parse_request() log.debug('Handling incoming request for %s', self.request.path) items = self._dispatch(self.request.path) # Close any open storages which will persist them to disk if hasattr(self, '_unsynced_storages'): for storage in self._unsynced_storages.values(): log.debug('Saving a %s storage to disk at "%s"', storage.file_format, storage.filename) storage.close() return items
The main entry point for a plugin.
entailment
def main(): '''The entry point for the console script xbmcswift2. The 'xbcmswift2' script is command bassed, so the second argument is always the command to execute. Each command has its own parser options and usages. If no command is provided or the -h flag is used without any other commands, the general help message is shown. ''' parser = OptionParser() if len(sys.argv) == 1: parser.set_usage(USAGE) parser.error('At least one command is required.') # spy sys.argv[1] in order to use correct opts/args command = sys.argv[1] if command == '-h': parser.set_usage(USAGE) opts, args = parser.parse_args() if command not in COMMANDS.keys(): parser.error('Invalid command') # We have a proper command, set the usage and options list according to the # specific command manager = COMMANDS[command] if hasattr(manager, 'option_list'): for args, kwargs in manager.option_list: parser.add_option(*args, **kwargs) if hasattr(manager, 'usage'): parser.set_usage(manager.usage) opts, args = parser.parse_args() # Since we are calling a specific comamnd's manager, we no longer need the # actual command in sys.argv so we slice from position 1 manager.run(opts, args[1:])
The entry point for the console script xbmcswift2. The 'xbcmswift2' script is command bassed, so the second argument is always the command to execute. Each command has its own parser options and usages. If no command is provided or the -h flag is used without any other commands, the general help message is shown.
entailment
def colorize_text(self, text): """Adds escape sequences to colorize text and make it beautiful. To colorize text, prefix the text you want to color with the color (capitalized) wrapped in double angle brackets (i.e.: <<GREEN>>). End your string with <<NORMAL>>. If you don't, it will be done for you (assuming you used a color code in your string.""" # Take note of where the escape sequences are. rnormal = text.rfind('<<NORMAL') rany = text.rfind('<<') # Put in the escape sequences. for color, code in self.colors.items(): text = text.replace('<<%s>>' % color, code) # Make sure that the last sequence is a NORMAL sequence. if rany > -1 and rnormal < rany: text += self.colors['NORMAL'] return text
Adds escape sequences to colorize text and make it beautiful. To colorize text, prefix the text you want to color with the color (capitalized) wrapped in double angle brackets (i.e.: <<GREEN>>). End your string with <<NORMAL>>. If you don't, it will be done for you (assuming you used a color code in your string.
entailment
def write(self, text): 'Uses curses to print in the fanciest way possible.' # Add color to the terminal. if not self.no_color: text = self.colorize_text(text) else: pattern = re.compile('\<\<[A-Z]*?\>\>') text = pattern.sub('', text) text += '\n' self.buffer.write(text) return self
Uses curses to print in the fanciest way possible.
entailment
def say(self, input=None, **kwargs): """Talk to Cleverbot. Arguments: input: The input argument is what you want to say to Cleverbot, such as "hello". tweak1-3: Changes Cleverbot's mood. **kwargs: Keyword arguments to update the request parameters with. Returns: Cleverbot's reply. Raises: APIError: A Cleverbot API error occurred. DecodeError: An error occurred while reading the reply. Timeout: The request timed out. """ params = self._get_params(input, kwargs) try: reply = self.session.get( self.url, params=params, timeout=self.timeout) except requests.Timeout: raise Timeout(self.timeout) else: try: data = reply.json() except ValueError as error: raise DecodeError(error) else: if reply.status_code == 200: self.data = data return data.get('output') else: raise APIError(data.get('error'), data.get('status'))
Talk to Cleverbot. Arguments: input: The input argument is what you want to say to Cleverbot, such as "hello". tweak1-3: Changes Cleverbot's mood. **kwargs: Keyword arguments to update the request parameters with. Returns: Cleverbot's reply. Raises: APIError: A Cleverbot API error occurred. DecodeError: An error occurred while reading the reply. Timeout: The request timed out.
entailment
def live_pidfile(pidfile): # pragma: no cover """(pidfile:str) -> int | None Returns an int found in the named file, if there is one, and if there is a running process with that process id. Return None if no such process exists. """ pid = read_pidfile(pidfile) if pid: try: kill(int(pid), 0) return pid except OSError as e: if e.errno == errno.EPERM: return pid return None
(pidfile:str) -> int | None Returns an int found in the named file, if there is one, and if there is a running process with that process id. Return None if no such process exists.
entailment
def _turn_sigterm_into_systemexit(): # pragma: no cover """ Attempts to turn a SIGTERM exception into a SystemExit exception. """ try: import signal except ImportError: return def handle_term(signo, frame): raise SystemExit signal.signal(signal.SIGTERM, handle_term)
Attempts to turn a SIGTERM exception into a SystemExit exception.
entailment
def wsgiref_server_runner(wsgi_app, global_conf, **kw): # pragma: no cover """ Entry point for wsgiref's WSGI server Additional parameters: ``certfile``, ``keyfile`` Optional SSL certificate file and host key file names. You can generate self-signed test files as follows: $ openssl genrsa 1024 > keyfile $ chmod 400 keyfile $ openssl req -new -x509 -nodes -sha1 -days 365 \\ -key keyfile > certfile $ chmod 400 certfile The file names should contain full paths. """ from wsgiref.simple_server import make_server, WSGIServer host = kw.get('host', '0.0.0.0') port = int(kw.get('port', 8080)) threaded = asbool(kw.get('wsgiref.threaded', False)) server_class = WSGIServer certfile = kw.get('wsgiref.certfile') keyfile = kw.get('wsgiref.keyfile') scheme = 'http' if certfile and keyfile: """ based on code from nullege: description='Dropbox REST API Client with more consistent responses.', author='Rick van Hattem', author_email='Rick@Wol.ph', url='http://wol.ph/', """ import ssl class SecureWSGIServer(WSGIServer): def get_request(self): socket, client_address = WSGIServer.get_request(self) socket = ssl.wrap_socket(socket, server_side=True, certfile=certfile, keyfile=keyfile) return socket, client_address port = int(kw.get('port', 4443)) server_class = SecureWSGIServer if threaded: from SocketServer import ThreadingMixIn class GearboxWSGIServer(ThreadingMixIn, server_class): pass server_type = 'Threaded' else: class GearboxWSGIServer(server_class): pass server_type = 'Standard' server = make_server(host, port, wsgi_app, server_class=GearboxWSGIServer) if certfile and keyfile: server_type += ' Secure' scheme += 's' ServeCommand.out('Starting %s HTTP server on %s://%s:%s' % (server_type, scheme, host, port)) server.serve_forever()
Entry point for wsgiref's WSGI server Additional parameters: ``certfile``, ``keyfile`` Optional SSL certificate file and host key file names. You can generate self-signed test files as follows: $ openssl genrsa 1024 > keyfile $ chmod 400 keyfile $ openssl req -new -x509 -nodes -sha1 -days 365 \\ -key keyfile > certfile $ chmod 400 certfile The file names should contain full paths.
entailment
def cherrypy_server_runner( app, global_conf=None, host='127.0.0.1', port=None, ssl_pem=None, protocol_version=None, numthreads=None, server_name=None, max=None, request_queue_size=None, timeout=None ): # pragma: no cover """ Entry point for CherryPy's WSGI server Serves the specified WSGI app via CherryPyWSGIServer. ``app`` The WSGI 'application callable'; multiple WSGI applications may be passed as (script_name, callable) pairs. ``host`` This is the ipaddress to bind to (or a hostname if your nameserver is properly configured). This defaults to 127.0.0.1, which is not a public interface. ``port`` The port to run on, defaults to 8080 for HTTP, or 4443 for HTTPS. This can be a string or an integer value. ``ssl_pem`` This an optional SSL certificate file (via OpenSSL) You can generate a self-signed test PEM certificate file as follows: $ openssl genrsa 1024 > host.key $ chmod 400 host.key $ openssl req -new -x509 -nodes -sha1 -days 365 \\ -key host.key > host.cert $ cat host.cert host.key > host.pem $ chmod 400 host.pem ``protocol_version`` The protocol used by the server, by default ``HTTP/1.1``. ``numthreads`` The number of worker threads to create. ``server_name`` The string to set for WSGI's SERVER_NAME environ entry. ``max`` The maximum number of queued requests. (defaults to -1 = no limit). ``request_queue_size`` The 'backlog' argument to socket.listen(); specifies the maximum number of queued connections. ``timeout`` The timeout in seconds for accepted connections. """ is_ssl = False if ssl_pem: port = port or 4443 is_ssl = True if not port: if ':' in host: host, port = host.split(':', 1) else: port = 8080 bind_addr = (host, int(port)) kwargs = {} for var_name in ('numthreads', 'max', 'request_queue_size', 'timeout'): var = locals()[var_name] if var is not None: kwargs[var_name] = int(var) server = None try: # Try to import from newer CherryPy releases. import cheroot.wsgi as wsgiserver server = wsgiserver.Server(bind_addr, app, server_name=server_name, **kwargs) except ImportError: # Nope. Try to import from older CherryPy releases. # We might just take another ImportError here. Oh well. from cherrypy import wsgiserver server = wsgiserver.CherryPyWSGIServer(bind_addr, app, server_name=server_name, **kwargs) server.ssl_certificate = server.ssl_private_key = ssl_pem if protocol_version: server.protocol = protocol_version try: protocol = is_ssl and 'https' or 'http' if host == '0.0.0.0': print('serving on 0.0.0.0:%s view at %s://127.0.0.1:%s' % (port, protocol, port)) else: print('serving on %s://%s:%s' % (protocol, host, port)) server.start() except (KeyboardInterrupt, SystemExit): server.stop() return server
Entry point for CherryPy's WSGI server Serves the specified WSGI app via CherryPyWSGIServer. ``app`` The WSGI 'application callable'; multiple WSGI applications may be passed as (script_name, callable) pairs. ``host`` This is the ipaddress to bind to (or a hostname if your nameserver is properly configured). This defaults to 127.0.0.1, which is not a public interface. ``port`` The port to run on, defaults to 8080 for HTTP, or 4443 for HTTPS. This can be a string or an integer value. ``ssl_pem`` This an optional SSL certificate file (via OpenSSL) You can generate a self-signed test PEM certificate file as follows: $ openssl genrsa 1024 > host.key $ chmod 400 host.key $ openssl req -new -x509 -nodes -sha1 -days 365 \\ -key host.key > host.cert $ cat host.cert host.key > host.pem $ chmod 400 host.pem ``protocol_version`` The protocol used by the server, by default ``HTTP/1.1``. ``numthreads`` The number of worker threads to create. ``server_name`` The string to set for WSGI's SERVER_NAME environ entry. ``max`` The maximum number of queued requests. (defaults to -1 = no limit). ``request_queue_size`` The 'backlog' argument to socket.listen(); specifies the maximum number of queued connections. ``timeout`` The timeout in seconds for accepted connections.
entailment
def parse_vars(self, args): """ Given variables like ``['a=b', 'c=d']`` turns it into ``{'a': 'b', 'c': 'd'}`` """ result = {} for arg in args: if '=' not in arg: raise ValueError( 'Variable assignment %r invalid (no "=")' % arg) name, value = arg.split('=', 1) result[name] = value return result
Given variables like ``['a=b', 'c=d']`` turns it into ``{'a': 'b', 'c': 'd'}``
entailment
def get_fixed_argv(self): # pragma: no cover """Get proper arguments for re-running the command. This is primarily for fixing some issues under Windows. First, there was a bug in Windows when running an executable located at a path with a space in it. This has become a non-issue with current versions of Python and Windows, so we don't take measures like adding quotes or calling win32api.GetShortPathName() as was necessary in former times. Second, depending on whether gearbox was installed as an egg or a wheel under Windows, it is run as a .py or an .exe stub. In the first case, we need to run it through the interpreter. On other operating systems, we can re-run the command as is. """ argv = sys.argv[:] if sys.platform == 'win32' and argv[0].endswith('.py'): argv.insert(0, sys.executable) return argv
Get proper arguments for re-running the command. This is primarily for fixing some issues under Windows. First, there was a bug in Windows when running an executable located at a path with a space in it. This has become a non-issue with current versions of Python and Windows, so we don't take measures like adding quotes or calling win32api.GetShortPathName() as was necessary in former times. Second, depending on whether gearbox was installed as an egg or a wheel under Windows, it is run as a .py or an .exe stub. In the first case, we need to run it through the interpreter. On other operating systems, we can re-run the command as is.
entailment
def main(): 'Main function. Handles delegation to other functions.' logging.basicConfig() type_choices = {'any': constants.PACKAGE_ANY, 'extension': constants.PACKAGE_EXTENSION, 'theme': constants.PACKAGE_THEME, 'dictionary': constants.PACKAGE_DICTIONARY, 'languagepack': constants.PACKAGE_LANGPACK, 'search': constants.PACKAGE_SEARCHPROV, 'multi': constants.PACKAGE_MULTI} # Parse the arguments that parser = argparse.ArgumentParser( description='Run tests on a Mozilla-type addon.') parser.add_argument('package', help="The path of the package you're testing") parser.add_argument('-t', '--type', default='any', choices=type_choices.keys(), help="Type of addon you assume you're testing", required=False) parser.add_argument('-o', '--output', default='text', choices=('text', 'json'), help='The output format that you expect', required=False) parser.add_argument('-v', '--verbose', action='store_const', const=True, help="""If the output format supports it, makes the analysis summary include extra info.""") parser.add_argument('--boring', action='store_const', const=True, help="""Activating this flag will remove color support from the terminal.""") parser.add_argument('--determined', action='store_const', const=True, help="""This flag will continue running tests in successive tests even if a lower tier fails.""") parser.add_argument('--selfhosted', action='store_const', const=True, help="""Indicates that the addon will not be hosted on addons.mozilla.org. This allows the <em:updateURL> element to be set.""") parser.add_argument('--approved_applications', help="""A JSON file containing acceptable applications and their versions""", required=False) parser.add_argument('--target-maxversion', help="""JSON string to override the package's targetapp_maxVersion for validation. The JSON object should be a dict of versions keyed by application GUID. For example, setting a package's max Firefox version to 5.*: {"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": "5.*"} """) parser.add_argument('--target-minversion', help="""JSON string to override the package's targetapp_minVersion for validation. The JSON object should be a dict of versions keyed by application GUID. For example, setting a package's min Firefox version to 5.*: {"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": "5.*"} """) parser.add_argument('--for-appversions', help="""JSON string to run validation tests for compatibility with a specific app/version. The JSON object should be a dict of version lists keyed by application GUID. For example, running Firefox 6.* compatibility tests: {"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": ["6.*"]} """) parser.add_argument('--timeout', help='The amount of time before validation is ' 'terminated with a timeout exception.', default='60') args = parser.parse_args() # We want to make sure that the output is expected. Parse out the expected # type for the add-on and pass it in for validation. if args.type not in type_choices: # Fail if the user provided invalid input. print 'Given expectation (%s) not valid. See --help for details' % \ args.type sys.exit(1) overrides = {} if args.target_minversion: overrides['targetapp_minVersion'] = json.loads(args.target_minversion) if args.target_maxversion: overrides['targetapp_maxVersion'] = json.loads(args.target_maxversion) for_appversions = None if args.for_appversions: for_appversions = json.loads(args.for_appversions) try: timeout = int(args.timeout) except ValueError: print 'Invalid timeout. Integer expected.' sys.exit(1) expectation = type_choices[args.type] error_bundle = validate(args.package, format=None, approved_applications=args.approved_applications, determined=args.determined, listed=not args.selfhosted, overrides=overrides, for_appversions=for_appversions, expectation=expectation, timeout=timeout) # Print the output of the tests based on the requested format. if args.output == 'text': print error_bundle.print_summary(verbose=args.verbose, no_color=args.boring).encode('utf-8') elif args.output == 'json': sys.stdout.write(error_bundle.render_json()) if error_bundle.failed(): sys.exit(1) else: sys.exit(0)
Main function. Handles delegation to other functions.
entailment
def package_contents(self): 'Returns a dictionary of file information' if self.contents_cache: return self.contents_cache # Get a list of ZipInfo objects. files = self.zf.infolist() out_files = {} # Iterate through each file in the XPI. for file_ in files: file_doc = {'name': file_.filename, 'size': file_.file_size, 'name_lower': file_.filename.lower()} file_doc['extension'] = file_doc['name_lower'].split('.')[-1] out_files[file_.filename] = file_doc self.contents_cache = out_files return out_files
Returns a dictionary of file information
entailment
def write(self, name, data): """Write a blob of data to the XPI manager.""" if isinstance(data, StringIO): self.zf.writestr(name, data.getvalue()) else: self.zf.writestr(name, to_utf8(data))
Write a blob of data to the XPI manager.
entailment
def write_file(self, name, path=None): """Write the contents of a file from the disk to the XPI.""" if path is None: path = name self.zf.write(path, name)
Write the contents of a file from the disk to the XPI.
entailment
def xbmc_url(url, **options): '''Appends key/val pairs to the end of a URL. Useful for passing arbitrary HTTP headers to XBMC to be used when fetching a media resource, e.g. cookies. ''' optionstring = urllib.urlencode(options) if optionstring: return url + '|' + optionstring return url
Appends key/val pairs to the end of a URL. Useful for passing arbitrary HTTP headers to XBMC to be used when fetching a media resource, e.g. cookies.
entailment
def enum(*args, **kwargs): '''An enum class to mirror XBMC constatns. All args and kwargs.keys are added as atrrs on the returned object. >>> States = enum('NEW_JERSEY', NY='NEW_YORK') >>> States.NY 'NEW_YORK' >>> States.NEW_JERSEY 'NEW_JERSEY' >>> States._fields ['NY', 'NEW_JERSEY'] ''' kwargs.update((arg, arg) for arg in args) kwargs['_fields'] = kwargs.keys() return type('Enum', (), kwargs)
An enum class to mirror XBMC constatns. All args and kwargs.keys are added as atrrs on the returned object. >>> States = enum('NEW_JERSEY', NY='NEW_YORK') >>> States.NY 'NEW_YORK' >>> States.NEW_JERSEY 'NEW_JERSEY' >>> States._fields ['NY', 'NEW_JERSEY']
entailment
def clean_dict(dct): '''Returns a dict where items with a None value are removed''' return dict((key, val) for key, val in dct.items() if val is not None)
Returns a dict where items with a None value are removed
entailment
def pickle_dict(items): '''Returns a new dictionary where values which aren't instances of basestring are pickled. Also, a new key '_pickled' contains a comma separated list of keys corresponding to the pickled values. ''' ret = {} pickled_keys = [] for key, val in items.items(): if isinstance(val, basestring): ret[key] = val else: pickled_keys.append(key) ret[key] = pickle.dumps(val) if pickled_keys: ret['_pickled'] = ','.join(pickled_keys) return ret
Returns a new dictionary where values which aren't instances of basestring are pickled. Also, a new key '_pickled' contains a comma separated list of keys corresponding to the pickled values.
entailment
def unpickle_args(items): '''Takes a dict and unpickles values whose keys are found in '_pickled' key. >>> unpickle_args({'_pickled': ['foo']. 'foo': ['I3%0A.']}) {'foo': 3} ''' # Technically there can be more than one _pickled value. At this point # we'll just use the first one pickled= items.pop('_pickled', None) if pickled is None: return items pickled_keys = pickled[0].split(',') ret = {} for key, vals in items.items(): if key in pickled_keys: ret[key] = [pickle.loads(val) for val in vals] else: ret[key] = vals return ret
Takes a dict and unpickles values whose keys are found in '_pickled' key. >>> unpickle_args({'_pickled': ['foo']. 'foo': ['I3%0A.']}) {'foo': 3}
entailment
def unpickle_dict(items): '''Returns a dict pickled with pickle_dict''' pickled_keys = items.pop('_pickled', '').split(',') ret = {} for key, val in items.items(): if key in pickled_keys: ret[key] = pickle.loads(val) else: ret[key] = val return ret
Returns a dict pickled with pickle_dict
entailment
def download_page(url, data=None): '''Returns the response for the given url. The optional data argument is passed directly to urlopen.''' conn = urllib2.urlopen(url, data) resp = conn.read() conn.close() return resp
Returns the response for the given url. The optional data argument is passed directly to urlopen.
entailment
def unhex(inp): '''unquote(r'abc\x20def') -> 'abc def'.''' res = inp.split(r'\x') for i in xrange(1, len(res)): item = res[i] try: res[i] = _hextochr[item[:2]] + item[2:] except KeyError: res[i] = '%' + item except UnicodeDecodeError: res[i] = unichr(int(item[:2], 16)) + item[2:] return ''.join(res)
unquote(r'abc\x20def') -> 'abc def'.
entailment
def load_commands(self, namespace): """Load all the commands from an entrypoint""" for ep in pkg_resources.iter_entry_points(namespace): LOG.debug('found command %r', ep.name) cmd_name = (ep.name.replace('_', ' ') if self.convert_underscores else ep.name) self.commands[cmd_name] = ep return
Load all the commands from an entrypoint
entailment
def find_command(self, argv): """Given an argument list, find a command and return the processor and any remaining arguments. """ search_args = argv[:] name = '' while search_args: if search_args[0].startswith('-'): name = '%s %s' % (name, search_args[0]) raise ValueError('Invalid command %r' % name) next_val = search_args.pop(0) name = '%s %s' % (name, next_val) if name else next_val if name in self.commands: cmd_ep = self.commands[name] if hasattr(cmd_ep, 'resolve'): cmd_factory = cmd_ep.resolve() else: # NOTE(dhellmann): Some fake classes don't take # require as an argument. Yay? arg_spec = inspect.getargspec(cmd_ep.load) if 'require' in arg_spec[0]: cmd_factory = cmd_ep.load(require=False) else: cmd_factory = cmd_ep.load() return (cmd_factory, name, search_args) else: raise ValueError('Unknown command %r' % next(iter(argv), ''))
Given an argument list, find a command and return the processor and any remaining arguments.
entailment
def setup_logging(config_uri, fileConfig=fileConfig, configparser=configparser): """ Set up logging via the logging module's fileConfig function with the filename specified via ``config_uri`` (a string in the form ``filename#sectionname``). ConfigParser defaults are specified for the special ``__file__`` and ``here`` variables, similar to PasteDeploy config loading. """ path, _ = _getpathsec(config_uri, None) parser = configparser.ConfigParser() parser.read([path]) if parser.has_section('loggers'): config_file = os.path.abspath(path) config_options = dict( __file__=config_file, here=os.path.dirname(config_file) ) fileConfig(config_file, config_options, disable_existing_loggers=False)
Set up logging via the logging module's fileConfig function with the filename specified via ``config_uri`` (a string in the form ``filename#sectionname``). ConfigParser defaults are specified for the special ``__file__`` and ``here`` variables, similar to PasteDeploy config loading.
entailment
def prepare_package(err, path, expectation=0, for_appversions=None, timeout=-1): """Prepares a file-based package for validation. timeout is the number of seconds before validation is aborted. If timeout is -1 then no timeout checking code will run. """ package = None try: # Test that the package actually exists. I consider this Tier 0 # since we may not even be dealing with a real file. if not os.path.isfile(path): err.error(('main', 'prepare_package', 'not_found'), 'The package could not be found') return # Pop the package extension. package_extension = os.path.splitext(path)[1] package_extension = package_extension.lower() def timeout_handler(signum, frame): raise validator.ValidationTimeout(timeout) if timeout != -1: signal.signal(signal.SIGALRM, timeout_handler) signal.setitimer(signal.ITIMER_REAL, timeout) if package_extension == '.xml': test_search(err, path, expectation) elif package_extension not in ('.xpi', '.jar'): err.error(('main', 'prepare_package', 'unrecognized'), 'The package is not of a recognized type.') else: package = open(path, 'rb') test_package(err, package, path, expectation, for_appversions) err.metadata['is_extension'] = err.detected_type == PACKAGE_EXTENSION except validator.ValidationTimeout: err.system_error( msg_id='validation_timeout', message='Validation has timed out', signing_severity='high', description=('Validation was unable to complete in the allotted ' 'time. This is most likely due to the size or ' 'complexity of your add-on.', 'This timeout has been logged, but please consider ' 'filing an issue report here: ' 'https://bit.ly/1POrYYU'), exc_info=sys.exc_info()) except Exception: err.system_error(exc_info=sys.exc_info()) finally: # Remove timers and signal handlers regardless of whether # we've completed successfully or the timer has fired. if timeout != -1: signal.setitimer(signal.ITIMER_REAL, 0) signal.signal(signal.SIGALRM, signal.SIG_DFL) if package: package.close() decorator.cleanup()
Prepares a file-based package for validation. timeout is the number of seconds before validation is aborted. If timeout is -1 then no timeout checking code will run.
entailment
def populate_chrome_manifest(err, xpi_package): "Loads the chrome.manifest if it's present" if 'chrome.manifest' in xpi_package: chrome_data = xpi_package.read('chrome.manifest') chrome = ChromeManifest(chrome_data, 'chrome.manifest') chrome_recursion_buster = set() # Handle the case of manifests linked from the manifest. def get_linked_manifest(path, from_path, from_chrome, from_triple): if path in chrome_recursion_buster: err.warning( err_id=('submain', 'populate_chrome_manifest', 'recursion'), warning='Linked manifest recursion detected.', description='A chrome registration file links back to ' 'itself. This can cause a multitude of ' 'issues.', filename=path) return # Make sure the manifest is properly linked if path not in xpi_package: err.notice( err_id=('submain', 'populate_chrome_manifest', 'linkerr'), notice='Linked manifest could not be found.', description=('A linked manifest file could not be found ' 'in the package.', 'Path: %s' % path), filename=from_path, line=from_triple['line'], context=from_chrome.context) return chrome_recursion_buster.add(path) manifest = ChromeManifest(xpi_package.read(path), path) for triple in manifest.triples: yield triple if triple['subject'] == 'manifest': subpath = triple['predicate'] # If the path is relative, make it relative to the current # file. if not subpath.startswith('/'): subpath = '%s/%s' % ( '/'.join(path.split('/')[:-1]), subpath) subpath = subpath.lstrip('/') for subtriple in get_linked_manifest( subpath, path, manifest, triple): yield subtriple chrome_recursion_buster.discard(path) chrome_recursion_buster.add('chrome.manifest') # Search for linked manifests in the base manifest. for extra_manifest in chrome.get_triples(subject='manifest'): # When one is found, add its triples to our own. for triple in get_linked_manifest(extra_manifest['predicate'], 'chrome.manifest', chrome, extra_manifest): chrome.triples.append(triple) chrome_recursion_buster.discard('chrome.manifest') # Create a reference so we can get the chrome manifest later, but make # it pushable so we don't run chrome manifests in JAR files. err.save_resource('chrome.manifest', chrome, pushable=True) # Create a non-pushable reference for tests that need to access the # chrome manifest from within JAR files. err.save_resource('chrome.manifest_nopush', chrome, pushable=False)
Loads the chrome.manifest if it's present
entailment
def detect_type(err, install_rdf=None, xpi_package=None): """Determines the type of add-on being validated based on install.rdf, file extension, and other properties.""" # The types in the install.rdf don't pair up 1:1 with the type # system that we're using for expectations and the like. This is # to help translate between the two. translated_types = {'2': PACKAGE_EXTENSION, '4': PACKAGE_THEME, '8': PACKAGE_LANGPACK, '32': PACKAGE_MULTI, '64': PACKAGE_DICTIONARY, # New "experiment" types: see bug 1220097 and # https://github.com/mozilla/addons-server/issues/3315 '128': PACKAGE_EXTENSION, '256': PACKAGE_EXTENSION,} # If we're missing our install.rdf file, we can try to make some # assumptions. if install_rdf is None: types = {'xpi': PACKAGE_DICTIONARY} err.notice(('typedetection', 'detect_type', 'missing_install_rdf'), 'install.rdf was not found.', 'The type should be determined by install.rdf if present. ' "If it isn't, we still need to know the type.") # If we know what the file type might be, return it. if xpi_package.extension in types: return types[xpi_package.extension] # Otherwise, we're out of luck :( else: return None # Attempt to locate the <em:type> node in the RDF doc. type_uri = install_rdf.uri('type') type_ = install_rdf.get_object(None, type_uri) # Dictionaries are weird too, they might not have the obligatory # em:type. We can assume that if they have a /dictionaries/ folder, # they are a dictionary because even if they aren't, dictionaries # have an extraordinarily strict set of rules and file filters that # must be passed. It's so crazy secure that it's cool if we use it # as kind of a fallback. if any(file_ for file_ in xpi_package if file_.startswith('dictionaries/')): if type_ != '64': err.error(('typedetection', 'dictionary_valid_type', 'invalid_em_type'), 'Invalid <em:type> value.', 'The package appears to be a dictionary but does not have ' 'the correct <em:type> set in the install manifest.') return PACKAGE_DICTIONARY if type_ is not None: if type_ in translated_types: err.save_resource('is_multipackage', type_ == '32', pushable=True) # Make sure we translate back to the normalized version return translated_types[type_] else: err.error(('typedetection', 'detect_type', 'invalid_em_type'), 'Invalid <em:type> value.', 'The only valid values for <em:type> are 2, 4, 8, and ' '32. Any other values are either invalid or deprecated.', 'install.rdf') return else: err.notice( err_id=('typedetection', 'detect_type', 'no_em:type'), notice='No <em:type> element found in install.rdf', description="It isn't always required, but it is the most reliable " 'method for determining add-on type.', filename='install.rdf') # There's no type element, so the spec says that it's either a # theme or an extension. At this point, we know that it isn't # a dictionary, language pack, or multiple extension pack. extensions = {'jar': '4', 'xpi': '2'} # If the package's extension is listed in the [tiny] extension # dictionary, then just return that. We'll validate against that # add-on type's layout later. Better to false positive than to false # negative. if xpi_package.extension in extensions: # Make sure it gets translated back to the normalized version install_rdf_type = extensions[xpi_package.extension] return translated_types[install_rdf_type]
Determines the type of add-on being validated based on install.rdf, file extension, and other properties.
entailment
def detect_opensearch(err, package, listed=False): 'Detect, parse, and validate an OpenSearch provider' # Parse the file. try: # Check if it is a file object. if hasattr(package, 'read'): srch_prov = parse(package) else: # It's not a file object; open it (the XML parser is bad at this). with open(package, 'rb') as package_file: srch_prov = parse(package_file) except DefusedXmlException: url = 'https://pypi.python.org/pypi/defusedxml/0.3#attack-vectors' err.error( err_id=('opensearch', 'security_error'), error='OpenSearch: XML Security Error', description='The OpenSearch extension could not be parsed due to ' 'a security error in the XML. See {url} for more ' 'info.'.format(url=url)) return err except ExpatError: err.error( err_id=('opensearch', 'parse_error'), error='OpenSearch: XML Parse Error', description='The OpenSearch extension could not be parsed due to ' 'a syntax error in the XML.') return err # Make sure that the root element is OpenSearchDescription. if srch_prov.documentElement.tagName != 'OpenSearchDescription': err.error( err_id=('opensearch', 'invalid_document_root'), error='OpenSearch: Invalid Document Root', description='The root element of the OpenSearch provider is not ' "'OpenSearchDescription'.") # Per bug 617822 if not srch_prov.documentElement.hasAttribute('xmlns'): err.error( err_id=('opensearch', 'no_xmlns'), error='OpenSearch: Missing XMLNS attribute', description='The XML namespace attribute is missing from the ' 'OpenSearch document.') if ('xmlns' not in srch_prov.documentElement.attributes.keys() or srch_prov.documentElement.attributes['xmlns'].value not in ( 'http://a9.com/-/spec/opensearch/1.0/', 'http://a9.com/-/spec/opensearch/1.1/', 'http://a9.com/-/spec/opensearchdescription/1.1/', 'http://a9.com/-/spec/opensearchdescription/1.0/')): err.error( err_id=('opensearch', 'invalid_xmlns'), error='OpenSearch: Bad XMLNS attribute', description='The XML namespace attribute contains an ' 'value.') # Make sure that there is exactly one ShortName. sn = srch_prov.documentElement.getElementsByTagName('ShortName') if not sn: err.error( err_id=('opensearch', 'missing_shortname'), error='OpenSearch: Missing <ShortName> elements', description='ShortName elements are mandatory OpenSearch provider ' 'elements.') elif len(sn) > 1: err.error( err_id=('opensearch', 'extra_shortnames'), error='OpenSearch: Too many <ShortName> elements', description='Too many ShortName elements exist in the OpenSearch ' 'provider.') else: sn_children = sn[0].childNodes short_name = 0 for node in sn_children: if node.nodeType == node.TEXT_NODE: short_name += len(node.data) if short_name > 16: err.error( err_id=('opensearch', 'big_shortname'), error='OpenSearch: <ShortName> element too long', description='The ShortName element must contains less than ' 'seventeen characters.') # Make sure that there is exactly one Description. if len(srch_prov.documentElement.getElementsByTagName('Description')) != 1: err.error( err_id=('opensearch', 'missing_description'), error='OpenSearch: Invalid number of <Description> elements', description='There are too many or too few Description elements ' 'in the OpenSearch provider.') # Grab the URLs and make sure that there is at least one. urls = srch_prov.documentElement.getElementsByTagName('Url') if not urls: err.error( err_id=('opensearch', 'missing_url'), error='OpenSearch: Missing <Url> elements', description='The OpenSearch provider is missing a Url element.') if listed and any(url.hasAttribute('rel') and url.attributes['rel'].value == 'self' for url in urls): err.error( err_id=('opensearch', 'rel_self'), error='OpenSearch: <Url> elements may not be rel=self', description='Per AMO guidelines, OpenSearch providers cannot ' "contain <Url /> elements with a 'rel' attribute " "pointing to the URL's current location. It must be " 'removed before posting this provider to AMO.') acceptable_mimes = ('text/html', 'application/xhtml+xml') acceptable_urls = [url for url in urls if url.hasAttribute('type') and url.attributes['type'].value in acceptable_mimes] # At least one Url must be text/html if not acceptable_urls: err.error( err_id=('opensearch', 'missing_url_texthtml'), error="OpenSearch: Missing <Url> element with 'text/html' type", description='OpenSearch providers must have at least one Url ' "element with a type attribute set to 'text/html'.") # Make sure that each Url has the require attributes. for url in acceptable_urls: if url.hasAttribute('rel') and url.attributes['rel'].value == 'self': continue if url.hasAttribute('method') and \ url.attributes['method'].value.upper() not in ('GET', 'POST'): err.error( err_id=('opensearch', 'missing_method'), error="OpenSearch: <Url> element with invalid 'method'", description='A Url element in the OpenSearch provider lists a ' 'method attribute, but the value is not GET or ' 'POST.') # Test for attribute presence. if not url.hasAttribute('template'): err.error( err_id=('opensearch', 'missing_template'), error='OpenSearch: <Url> element missing template attribute', description='<Url> elements of OpenSearch providers must ' 'include a template attribute.') else: url_template = url.attributes['template'].value if url_template[:4] != 'http': err.error( err_id=('opensearch', 'invalid_template'), error='OpenSearch: `<Url>` element with invalid ' '`template`', description='A `<Url>` element in the OpenSearch ' 'provider lists a template attribute, but ' 'the value is not a valid HTTP URL.') # Make sure that there is a {searchTerms} placeholder in the # URL template. found_template = url_template.count('{searchTerms}') > 0 # If we didn't find it in a simple parse of the template="" # attribute, look deeper at the <Param /> elements. if not found_template: for param in url.getElementsByTagName('Param'): # As long as we're in here and dependent on the # attributes, we'd might as well validate them. attribute_keys = param.attributes.keys() if 'name' not in attribute_keys or \ 'value' not in attribute_keys: err.error( err_id=('opensearch', 'param_missing_attrs'), error='OpenSearch: `<Param>` element missing ' 'name/value', description='Param elements in the OpenSearch ' 'provider must include a name and a ' 'value attribute.') param_value = (param.attributes['value'].value if 'value' in param.attributes.keys() else '') if param_value.count('{searchTerms}'): found_template = True # Since we're in a validating spirit, continue # looking for more errors and don't break # If the template still hasn't been found... if not found_template: tpl = url.attributes['template'].value err.error( err_id=('opensearch', 'template_not_found'), error='OpenSearch: <Url> element missing template ' 'placeholder', description=('`<Url>` elements of OpenSearch providers ' 'must include a template attribute or ' 'specify a placeholder with ' '`{searchTerms}`.', 'Missing template: %s' % tpl)) # Make sure there are no updateURL elements if srch_prov.getElementsByTagName('updateURL'): err.error( err_id=('opensearch', 'banned_updateurl'), error='OpenSearch: <updateURL> elements are banned in OpenSearch ' 'providers.', description='OpenSearch providers may not contain <updateURL> ' 'elements.') # The OpenSearch provider is valid! return err
Detect, parse, and validate an OpenSearch provider
entailment
def copy_dir(source, dest, vars, verbosity=1, simulate=False, indent=0, sub_vars=True, interactive=False, overwrite=True, template_renderer=None, out_=sys.stdout): """ Copies the ``source`` directory to the ``dest`` directory. ``vars``: A dictionary of variables to use in any substitutions. ``verbosity``: Higher numbers will show more about what is happening. ``simulate``: If true, then don't actually *do* anything. ``indent``: Indent any messages by this amount. ``sub_vars``: If true, variables in ``_tmpl`` files and ``+var+`` in filenames will be substituted. ``overwrite``: If false, then don't ever overwrite anything. ``interactive``: If you are overwriting a file and interactive is true, then ask before overwriting. ``template_renderer``: This is a function for rendering templates (if you don't want to use string.Template). It should have the signature ``template_renderer(content_as_string, vars_as_dict, filename=filename)``. """ def out(msg): out_.write(msg) out_.write('\n') out_.flush() # This allows you to use a leading +dot+ in filenames which would # otherwise be skipped because leading dots make the file hidden: vars.setdefault('dot', '.') vars.setdefault('plus', '+') use_pkg_resources = isinstance(source, tuple) if use_pkg_resources: names = sorted(pkg_resources.resource_listdir(source[0], source[1])) else: names = sorted(os.listdir(source)) pad = ' '*(indent*2) if not os.path.exists(dest): if verbosity >= 1: out('%sCreating %s/' % (pad, dest)) if not simulate: makedirs(dest, verbosity=verbosity, pad=pad) elif verbosity >= 2: out('%sDirectory %s exists' % (pad, dest)) for name in names: if use_pkg_resources: full = '/'.join([source[1], name]) else: full = os.path.join(source, name) reason = should_skip_file(name) if reason: if verbosity >= 2: reason = pad + reason % {'filename': full} out(reason) continue # pragma: no cover if sub_vars: dest_full = os.path.join(dest, substitute_filename(name, vars)) sub_file = False if dest_full.endswith('_tmpl'): dest_full = dest_full[:-5] sub_file = sub_vars if use_pkg_resources and pkg_resources.resource_isdir(source[0], full): if verbosity: out('%sRecursing into %s' % (pad, os.path.basename(full))) copy_dir((source[0], full), dest_full, vars, verbosity, simulate, indent=indent+1, sub_vars=sub_vars, interactive=interactive, template_renderer=template_renderer, out_=out_) continue elif not use_pkg_resources and os.path.isdir(full): if verbosity: out('%sRecursing into %s' % (pad, os.path.basename(full))) copy_dir(full, dest_full, vars, verbosity, simulate, indent=indent+1, sub_vars=sub_vars, interactive=interactive, template_renderer=template_renderer, out_=out_) continue elif use_pkg_resources: content = pkg_resources.resource_string(source[0], full) else: f = open(full, 'rb') content = f.read() f.close() if sub_file: try: content = substitute_content( content, vars, filename=full, template_renderer=template_renderer ) except SkipTemplate: continue # pragma: no cover if content is None: continue # pragma: no cover already_exists = os.path.exists(dest_full) if already_exists: f = open(dest_full, 'rb') old_content = f.read() f.close() if old_content == content: if verbosity: out('%s%s already exists (same content)' % (pad, dest_full)) continue # pragma: no cover if interactive: if not query_interactive( native_(full, fsenc), native_(dest_full, fsenc), native_(content, fsenc), native_(old_content, fsenc), simulate=simulate, out_=out_): continue elif not overwrite: continue # pragma: no cover if verbosity and use_pkg_resources: out('%sCopying %s to %s' % (pad, full, dest_full)) elif verbosity: out( '%sCopying %s to %s' % (pad, os.path.basename(full), dest_full)) if not simulate: f = open(dest_full, 'wb') f.write(content) f.close()
Copies the ``source`` directory to the ``dest`` directory. ``vars``: A dictionary of variables to use in any substitutions. ``verbosity``: Higher numbers will show more about what is happening. ``simulate``: If true, then don't actually *do* anything. ``indent``: Indent any messages by this amount. ``sub_vars``: If true, variables in ``_tmpl`` files and ``+var+`` in filenames will be substituted. ``overwrite``: If false, then don't ever overwrite anything. ``interactive``: If you are overwriting a file and interactive is true, then ask before overwriting. ``template_renderer``: This is a function for rendering templates (if you don't want to use string.Template). It should have the signature ``template_renderer(content_as_string, vars_as_dict, filename=filename)``.
entailment
def should_skip_file(name): """ Checks if a file should be skipped based on its name. If it should be skipped, returns the reason, otherwise returns None. """ if name.startswith('.'): return 'Skipping hidden file %(filename)s' if name.endswith('~') or name.endswith('.bak'): return 'Skipping backup file %(filename)s' if name.endswith('.pyc') or name.endswith('.pyo'): return 'Skipping %s file ' % os.path.splitext(name)[1] + '%(filename)s' if name.endswith('$py.class'): return 'Skipping $py.class file %(filename)s' if name in ('CVS', '_darcs'): return 'Skipping version control directory %(filename)s' return None
Checks if a file should be skipped based on its name. If it should be skipped, returns the reason, otherwise returns None.
entailment
def setup_options(opts): '''Takes any actions necessary based on command line options''' if opts.quiet: logger.log.setLevel(logging.WARNING) logger.GLOBAL_LOG_LEVEL = logging.WARNING if opts.verbose: logger.log.setLevel(logging.DEBUG) logger.GLOBAL_LOG_LEVEL = logging.DEBUG
Takes any actions necessary based on command line options
entailment
def get_addon_module_name(addonxml_filename): '''Attempts to extract a module name for the given addon's addon.xml file. Looks for the 'xbmc.python.pluginsource' extension node and returns the addon's filename without the .py suffix. ''' try: xml = ET.parse(addonxml_filename).getroot() except IOError: sys.exit('Cannot find an addon.xml file in the current working ' 'directory. Please run this command from the root directory ' 'of an addon.') try: plugin_source = (ext for ext in xml.findall('extension') if ext.get('point') == 'xbmc.python.pluginsource').next() except StopIteration: sys.exit('ERROR, no pluginsource in addonxml') return plugin_source.get('library').split('.')[0]
Attempts to extract a module name for the given addon's addon.xml file. Looks for the 'xbmc.python.pluginsource' extension node and returns the addon's filename without the .py suffix.
entailment
def patch_plugin(plugin, path, handle=None): '''Patches a few attributes of a plugin instance to enable a new call to plugin.run() ''' if handle is None: handle = plugin.request.handle patch_sysargv(path, handle) plugin._end_of_directory = False
Patches a few attributes of a plugin instance to enable a new call to plugin.run()
entailment
def once(plugin, parent_stack=None): '''A run mode for the CLI that runs the plugin once and exits.''' plugin.clear_added_items() items = plugin.run() # if update_listing=True, we need to remove the last url from the parent # stack if parent_stack and plugin._update_listing: del parent_stack[-1] # if we have parent items, include the most recent in the display if parent_stack: items.insert(0, parent_stack[-1]) display_listitems(items, plugin.request.url) return items
A run mode for the CLI that runs the plugin once and exits.
entailment
def interactive(plugin): '''A run mode for the CLI that runs the plugin in a loop based on user input. ''' items = [item for item in once(plugin) if not item.get_played()] parent_stack = [] # Keep track of parents so we can have a '..' option selected_item = get_user_choice(items) while selected_item is not None: if parent_stack and selected_item == parent_stack[-1]: # User selected the parent item, remove from list parent_stack.pop() else: # User selected non parent item, add current url to parent stack parent_stack.append(ListItem.from_dict(label='..', path=plugin.request.url)) patch_plugin(plugin, selected_item.get_path()) items = [item for item in once(plugin, parent_stack=parent_stack) if not item.get_played()] selected_item = get_user_choice(items)
A run mode for the CLI that runs the plugin in a loop based on user input.
entailment
def crawl(plugin): '''Performs a breadth-first crawl of all possible routes from the starting path. Will only visit a URL once, even if it is referenced multiple times in a plugin. Requires user interaction in between each fetch. ''' # TODO: use OrderedSet? paths_visited = set() paths_to_visit = set(item.get_path() for item in once(plugin)) while paths_to_visit and continue_or_quit(): path = paths_to_visit.pop() paths_visited.add(path) # Run the new listitem patch_plugin(plugin, path) new_paths = set(item.get_path() for item in once(plugin)) # Filter new items by checking against urls_visited and # urls_tovisit paths_to_visit.update(path for path in new_paths if path not in paths_visited)
Performs a breadth-first crawl of all possible routes from the starting path. Will only visit a URL once, even if it is referenced multiple times in a plugin. Requires user interaction in between each fetch.
entailment
def run(opts, args): '''The run method for the 'run' command. Executes a plugin from the command line. ''' setup_options(opts) mode = Modes.ONCE if len(args) > 0 and hasattr(Modes, args[0].upper()): _mode = args.pop(0).upper() mode = getattr(Modes, _mode) url = None if len(args) > 0: # A url was specified url = args.pop(0) plugin_mgr = PluginManager.load_plugin_from_addonxml(mode, url) plugin_mgr.run()
The run method for the 'run' command. Executes a plugin from the command line.
entailment
def load_plugin_from_addonxml(cls, mode, url): '''Attempts to import a plugin's source code and find an instance of :class:`~xbmcswif2.Plugin`. Returns an instance of PluginManager if succesful. ''' cwd = os.getcwd() sys.path.insert(0, cwd) module_name = get_addon_module_name(os.path.join(cwd, 'addon.xml')) addon = __import__(module_name) # Find the first instance of xbmcswift2.Plugin try: plugin = (attr_value for attr_value in vars(addon).values() if isinstance(attr_value, Plugin)).next() except StopIteration: sys.exit('Could\'t find a Plugin instance in %s.py' % module_name) return cls(plugin, mode, url)
Attempts to import a plugin's source code and find an instance of :class:`~xbmcswif2.Plugin`. Returns an instance of PluginManager if succesful.
entailment
def run(self): '''This method runs the the plugin in the appropriate mode parsed from the command line options. ''' handle = 0 handlers = { Modes.ONCE: once, Modes.CRAWL: crawl, Modes.INTERACTIVE: interactive, } handler = handlers[self.mode] patch_sysargv(self.url or 'plugin://%s/' % self.plugin.id, handle) return handler(self.plugin)
This method runs the the plugin in the appropriate mode parsed from the command line options.
entailment
def _setup_config(self, dist, filename, section, vars, verbosity): """ Called to setup an application, given its configuration file/directory. The default implementation calls ``package.websetup.setup_config(command, filename, section, vars)`` or ``package.websetup.setup_app(command, config, vars)`` With ``setup_app`` the ``config`` object is a dictionary with the extra attributes ``global_conf``, ``local_conf`` and ``filename`` """ modules = [line.strip() for line in dist.get_metadata_lines('top_level.txt') if line.strip() and not line.strip().startswith('#')] if not modules: print('No modules are listed in top_level.txt') print('Try running python setup.py egg_info to regenerate that file') for mod_name in modules: mod_name = mod_name + '.websetup' try: mod = self._import_module(mod_name) except ImportError as e: print(e) desc = getattr(e, 'args', ['No module named websetup'])[0] if not desc.startswith('No module named websetup'): raise mod = None if mod is None: continue if hasattr(mod, 'setup_app'): if verbosity: print('Running setup_app() from %s' % mod_name) self._call_setup_app(mod.setup_app, filename, section, vars) elif hasattr(mod, 'setup_config'): if verbosity: print('Running setup_config() from %s' % mod_name) mod.setup_config(None, filename, section, vars) else: print('No setup_app() or setup_config() function in %s (%s)' % (mod.__name__, mod.__file__))
Called to setup an application, given its configuration file/directory. The default implementation calls ``package.websetup.setup_config(command, filename, section, vars)`` or ``package.websetup.setup_app(command, config, vars)`` With ``setup_app`` the ``config`` object is a dictionary with the extra attributes ``global_conf``, ``local_conf`` and ``filename``
entailment
def _import_module(self, s): """ Import a module. """ mod = __import__(s) parts = s.split('.') for part in parts[1:]: mod = getattr(mod, part) return mod
Import a module.
entailment
def say(self, input=None, **kwargs): """Talk to Cleverbot. Arguments: input: The input argument is what you want to say to Cleverbot, such as "hello". tweak1-3: Changes Cleverbot's mood. **kwargs: Keyword arguments to update the request parameters with. Returns: Cleverbot's reply. Raises: APIError: A Cleverbot API error occurred. DecodeError: An error occurred while reading the reply. Timeout: The request timed out. """ params = self._get_params(input, kwargs) try: reply = yield from self.session.get( self.url, params=params, timeout=self.timeout) except asyncio.TimeoutError: raise Timeout(self.timeout) else: try: data = yield from reply.json() except ValueError as error: raise DecodeError(error) else: if reply.status == 200: self.data = data return data.get('output') else: raise APIError(data.get('error'), data.get('status'))
Talk to Cleverbot. Arguments: input: The input argument is what you want to say to Cleverbot, such as "hello". tweak1-3: Changes Cleverbot's mood. **kwargs: Keyword arguments to update the request parameters with. Returns: Cleverbot's reply. Raises: APIError: A Cleverbot API error occurred. DecodeError: An error occurred while reading the reply. Timeout: The request timed out.
entailment
def conversation(self, name=None, **kwargs): """Make a new conversation. Arguments: name: The key for the dictionary the conversation will be stored as in conversations. If None the conversation will be stored as a list instead. Mixing both types results in an error. **kwargs: Keyword arguments to pass into the new conversation. These accept the same arguments as Cleverbot. Returns: The new conversation. """ convo = Conversation(self, **kwargs) super().conversation(name, convo) return convo
Make a new conversation. Arguments: name: The key for the dictionary the conversation will be stored as in conversations. If None the conversation will be stored as a list instead. Mixing both types results in an error. **kwargs: Keyword arguments to pass into the new conversation. These accept the same arguments as Cleverbot. Returns: The new conversation.
entailment