id
stringlengths
28
33
content
stringlengths
14
265k
max_stars_repo_path
stringlengths
49
55
crossvul-python_data_good_3500_4
import os from subprocess import Popen, PIPE import Bcfg2.Server.Plugin # for debugging output only import logging logger = logging.getLogger('Bcfg2.Plugins.Svn') class Svn(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Version): """Svn is a version plugin for dealing with Bcfg2 repos.""" name = 'Svn' __version__ = '$Id$' __author__ = 'bcfg-dev@mcs.anl.gov' def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) self.core = core self.datastore = datastore # path to svn directory for bcfg2 repo svn_dir = "%s/.svn" % datastore # Read revision from bcfg2 repo if os.path.isdir(svn_dir): self.get_revision() else: logger.error("%s is not a directory" % svn_dir) raise Bcfg2.Server.Plugin.PluginInitError logger.debug("Initialized svn plugin with svn directory = %s" % svn_dir) def get_revision(self): """Read svn revision information for the Bcfg2 repository.""" try: data = Popen(("env LC_ALL=C svn info %s" % pipes.quote(self.datastore)), shell=True, stdout=PIPE).communicate()[0].split('\n') return [line.split(': ')[1] for line in data \ if line[:9] == 'Revision:'][-1] except IndexError: logger.error("Failed to read svn info; disabling svn support") logger.error('''Ran command "svn info %s"''' % (self.datastore)) logger.error("Got output: %s" % data) raise Bcfg2.Server.Plugin.PluginInitError
./CrossVul/dataset_final_sorted/CWE-20/py/good_3500_4
crossvul-python_data_bad_1065_1
# Copyright (C) 2002-2007 Python Software Foundation # Contact: email-sig@python.org """Email address parsing code. Lifted directly from rfc822.py. This should eventually be rewritten. """ __all__ = [ 'mktime_tz', 'parsedate', 'parsedate_tz', 'quote', ] import time, calendar SPACE = ' ' EMPTYSTRING = '' COMMASPACE = ', ' # Parse a date field _monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'] _daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] # The timezone table does not include the military time zones defined # in RFC822, other than Z. According to RFC1123, the description in # RFC822 gets the signs wrong, so we can't rely on any such time # zones. RFC1123 recommends that numeric timezone indicators be used # instead of timezone names. _timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, 'AST': -400, 'ADT': -300, # Atlantic (used in Canada) 'EST': -500, 'EDT': -400, # Eastern 'CST': -600, 'CDT': -500, # Central 'MST': -700, 'MDT': -600, # Mountain 'PST': -800, 'PDT': -700 # Pacific } def parsedate_tz(data): """Convert a date string to a time tuple. Accounts for military timezones. """ res = _parsedate_tz(data) if not res: return if res[9] is None: res[9] = 0 return tuple(res) def _parsedate_tz(data): """Convert date to extended time tuple. The last (additional) element is the time zone offset in seconds, except if the timezone was specified as -0000. In that case the last element is None. This indicates a UTC timestamp that explicitly declaims knowledge of the source timezone, as opposed to a +0000 timestamp that indicates the source timezone really was UTC. """ if not data: return data = data.split() # The FWS after the comma after the day-of-week is optional, so search and # adjust for this. if data[0].endswith(',') or data[0].lower() in _daynames: # There's a dayname here. Skip it del data[0] else: i = data[0].rfind(',') if i >= 0: data[0] = data[0][i+1:] if len(data) == 3: # RFC 850 date, deprecated stuff = data[0].split('-') if len(stuff) == 3: data = stuff + data[1:] if len(data) == 4: s = data[3] i = s.find('+') if i == -1: i = s.find('-') if i > 0: data[3:] = [s[:i], s[i:]] else: data.append('') # Dummy tz if len(data) < 5: return None data = data[:5] [dd, mm, yy, tm, tz] = data mm = mm.lower() if mm not in _monthnames: dd, mm = mm, dd.lower() if mm not in _monthnames: return None mm = _monthnames.index(mm) + 1 if mm > 12: mm -= 12 if dd[-1] == ',': dd = dd[:-1] i = yy.find(':') if i > 0: yy, tm = tm, yy if yy[-1] == ',': yy = yy[:-1] if not yy[0].isdigit(): yy, tz = tz, yy if tm[-1] == ',': tm = tm[:-1] tm = tm.split(':') if len(tm) == 2: [thh, tmm] = tm tss = '0' elif len(tm) == 3: [thh, tmm, tss] = tm elif len(tm) == 1 and '.' in tm[0]: # Some non-compliant MUAs use '.' to separate time elements. tm = tm[0].split('.') if len(tm) == 2: [thh, tmm] = tm tss = 0 elif len(tm) == 3: [thh, tmm, tss] = tm else: return None try: yy = int(yy) dd = int(dd) thh = int(thh) tmm = int(tmm) tss = int(tss) except ValueError: return None # Check for a yy specified in two-digit format, then convert it to the # appropriate four-digit format, according to the POSIX standard. RFC 822 # calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822) # mandates a 4-digit yy. For more information, see the documentation for # the time module. if yy < 100: # The year is between 1969 and 1999 (inclusive). if yy > 68: yy += 1900 # The year is between 2000 and 2068 (inclusive). else: yy += 2000 tzoffset = None tz = tz.upper() if tz in _timezones: tzoffset = _timezones[tz] else: try: tzoffset = int(tz) except ValueError: pass if tzoffset==0 and tz.startswith('-'): tzoffset = None # Convert a timezone offset into seconds ; -0500 -> -18000 if tzoffset: if tzoffset < 0: tzsign = -1 tzoffset = -tzoffset else: tzsign = 1 tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60) # Daylight Saving Time flag is set to -1, since DST is unknown. return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset] def parsedate(data): """Convert a time string to a time tuple.""" t = parsedate_tz(data) if isinstance(t, tuple): return t[:9] else: return t def mktime_tz(data): """Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp.""" if data[9] is None: # No zone info, so localtime is better assumption than GMT return time.mktime(data[:8] + (-1,)) else: t = calendar.timegm(data) return t - data[9] def quote(str): """Prepare string to be used in a quoted string. Turns backslash and double quote characters into quoted pairs. These are the only characters that need to be quoted inside a quoted string. Does not add the surrounding double quotes. """ return str.replace('\\', '\\\\').replace('"', '\\"') class AddrlistClass: """Address parser class by Ben Escoto. To understand what this class does, it helps to have a copy of RFC 2822 in front of you. Note: this class interface is deprecated and may be removed in the future. Use email.utils.AddressList instead. """ def __init__(self, field): """Initialize a new instance. `field' is an unparsed address header field, containing one or more addresses. """ self.specials = '()<>@,:;.\"[]' self.pos = 0 self.LWS = ' \t' self.CR = '\r\n' self.FWS = self.LWS + self.CR self.atomends = self.specials + self.LWS + self.CR # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it # is obsolete syntax. RFC 2822 requires that we recognize obsolete # syntax, so allow dots in phrases. self.phraseends = self.atomends.replace('.', '') self.field = field self.commentlist = [] def gotonext(self): """Skip white space and extract comments.""" wslist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS + '\n\r': if self.field[self.pos] not in '\n\r': wslist.append(self.field[self.pos]) self.pos += 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) else: break return EMPTYSTRING.join(wslist) def getaddrlist(self): """Parse all addresses. Returns a list containing all of the addresses. """ result = [] while self.pos < len(self.field): ad = self.getaddress() if ad: result += ad else: result.append(('', '')) return result def getaddress(self): """Parse the next address.""" self.commentlist = [] self.gotonext() oldpos = self.pos oldcl = self.commentlist plist = self.getphraselist() self.gotonext() returnlist = [] if self.pos >= len(self.field): # Bad email address technically, no domain. if plist: returnlist = [(SPACE.join(self.commentlist), plist[0])] elif self.field[self.pos] in '.@': # email address is just an addrspec # this isn't very efficient since we start over self.pos = oldpos self.commentlist = oldcl addrspec = self.getaddrspec() returnlist = [(SPACE.join(self.commentlist), addrspec)] elif self.field[self.pos] == ':': # address is a group returnlist = [] fieldlen = len(self.field) self.pos += 1 while self.pos < len(self.field): self.gotonext() if self.pos < fieldlen and self.field[self.pos] == ';': self.pos += 1 break returnlist = returnlist + self.getaddress() elif self.field[self.pos] == '<': # Address is a phrase then a route addr routeaddr = self.getrouteaddr() if self.commentlist: returnlist = [(SPACE.join(plist) + ' (' + ' '.join(self.commentlist) + ')', routeaddr)] else: returnlist = [(SPACE.join(plist), routeaddr)] else: if plist: returnlist = [(SPACE.join(self.commentlist), plist[0])] elif self.field[self.pos] in self.specials: self.pos += 1 self.gotonext() if self.pos < len(self.field) and self.field[self.pos] == ',': self.pos += 1 return returnlist def getrouteaddr(self): """Parse a route address (Return-path value). This method just skips all the route stuff and returns the addrspec. """ if self.field[self.pos] != '<': return expectroute = False self.pos += 1 self.gotonext() adlist = '' while self.pos < len(self.field): if expectroute: self.getdomain() expectroute = False elif self.field[self.pos] == '>': self.pos += 1 break elif self.field[self.pos] == '@': self.pos += 1 expectroute = True elif self.field[self.pos] == ':': self.pos += 1 else: adlist = self.getaddrspec() self.pos += 1 break self.gotonext() return adlist def getaddrspec(self): """Parse an RFC 2822 addr-spec.""" aslist = [] self.gotonext() while self.pos < len(self.field): preserve_ws = True if self.field[self.pos] == '.': if aslist and not aslist[-1].strip(): aslist.pop() aslist.append('.') self.pos += 1 preserve_ws = False elif self.field[self.pos] == '"': aslist.append('"%s"' % quote(self.getquote())) elif self.field[self.pos] in self.atomends: if aslist and not aslist[-1].strip(): aslist.pop() break else: aslist.append(self.getatom()) ws = self.gotonext() if preserve_ws and ws: aslist.append(ws) if self.pos >= len(self.field) or self.field[self.pos] != '@': return EMPTYSTRING.join(aslist) aslist.append('@') self.pos += 1 self.gotonext() return EMPTYSTRING.join(aslist) + self.getdomain() def getdomain(self): """Get the complete domain name from an address.""" sdlist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos += 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] == '[': sdlist.append(self.getdomainliteral()) elif self.field[self.pos] == '.': self.pos += 1 sdlist.append('.') elif self.field[self.pos] in self.atomends: break else: sdlist.append(self.getatom()) return EMPTYSTRING.join(sdlist) def getdelimited(self, beginchar, endchars, allowcomments=True): """Parse a header fragment delimited by special characters. `beginchar' is the start character for the fragment. If self is not looking at an instance of `beginchar' then getdelimited returns the empty string. `endchars' is a sequence of allowable end-delimiting characters. Parsing stops when one of these is encountered. If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed within the parsed fragment. """ if self.field[self.pos] != beginchar: return '' slist = [''] quote = False self.pos += 1 while self.pos < len(self.field): if quote: slist.append(self.field[self.pos]) quote = False elif self.field[self.pos] in endchars: self.pos += 1 break elif allowcomments and self.field[self.pos] == '(': slist.append(self.getcomment()) continue # have already advanced pos from getcomment elif self.field[self.pos] == '\\': quote = True else: slist.append(self.field[self.pos]) self.pos += 1 return EMPTYSTRING.join(slist) def getquote(self): """Get a quote-delimited fragment from self's field.""" return self.getdelimited('"', '"\r', False) def getcomment(self): """Get a parenthesis-delimited fragment from self's field.""" return self.getdelimited('(', ')\r', True) def getdomainliteral(self): """Parse an RFC 2822 domain-literal.""" return '[%s]' % self.getdelimited('[', ']\r', False) def getatom(self, atomends=None): """Parse an RFC 2822 atom. Optional atomends specifies a different set of end token delimiters (the default is to use self.atomends). This is used e.g. in getphraselist() since phrase endings must not include the `.' (which is legal in phrases).""" atomlist = [''] if atomends is None: atomends = self.atomends while self.pos < len(self.field): if self.field[self.pos] in atomends: break else: atomlist.append(self.field[self.pos]) self.pos += 1 return EMPTYSTRING.join(atomlist) def getphraselist(self): """Parse a sequence of RFC 2822 phrases. A phrase is a sequence of words, which are in turn either RFC 2822 atoms or quoted-strings. Phrases are canonicalized by squeezing all runs of continuous whitespace into one space. """ plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.FWS: self.pos += 1 elif self.field[self.pos] == '"': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.phraseends: break else: plist.append(self.getatom(self.phraseends)) return plist class AddressList(AddrlistClass): """An AddressList encapsulates a list of parsed RFC 2822 addresses.""" def __init__(self, field): AddrlistClass.__init__(self, field) if field: self.addresslist = self.getaddrlist() else: self.addresslist = [] def __len__(self): return len(self.addresslist) def __add__(self, other): # Set union newaddr = AddressList(None) newaddr.addresslist = self.addresslist[:] for x in other.addresslist: if not x in self.addresslist: newaddr.addresslist.append(x) return newaddr def __iadd__(self, other): # Set union, in-place for x in other.addresslist: if not x in self.addresslist: self.addresslist.append(x) return self def __sub__(self, other): # Set difference newaddr = AddressList(None) for x in self.addresslist: if not x in other.addresslist: newaddr.addresslist.append(x) return newaddr def __isub__(self, other): # Set difference, in-place for x in other.addresslist: if x in self.addresslist: self.addresslist.remove(x) return self def __getitem__(self, index): # Make indexing, slices, and 'in' work return self.addresslist[index]
./CrossVul/dataset_final_sorted/CWE-20/py/bad_1065_1
crossvul-python_data_bad_652_0
from __future__ import unicode_literals from itertools import chain import re import string import six from xml.sax.saxutils import unescape import html5lib from html5lib.constants import ( entities, namespaces, prefixes, tokenTypes, ) try: from html5lib.constants import ReparseException except ImportError: # html5lib-python 1.0 changed the name from html5lib.constants import _ReparseException as ReparseException from html5lib.filters.base import Filter from html5lib.filters import sanitizer from html5lib.serializer import HTMLSerializer from html5lib._tokenizer import HTMLTokenizer from html5lib._trie import Trie from bleach.utils import alphabetize_attributes, force_unicode #: Trie of html entity string -> character representation ENTITIES_TRIE = Trie(entities) #: List of allowed tags ALLOWED_TAGS = [ 'a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'strong', 'ul', ] #: Map of allowed attributes by tag ALLOWED_ATTRIBUTES = { 'a': ['href', 'title'], 'abbr': ['title'], 'acronym': ['title'], } #: List of allowed styles ALLOWED_STYLES = [] #: List of allowed protocols ALLOWED_PROTOCOLS = ['http', 'https', 'mailto'] AMP_SPLIT_RE = re.compile('(&)') #: Invisible characters--0 to and including 31 except 9 (tab), 10 (lf), and 13 (cr) INVISIBLE_CHARACTERS = ''.join([chr(c) for c in chain(range(0, 9), range(11, 13), range(14, 32))]) #: Regexp for characters that are invisible INVISIBLE_CHARACTERS_RE = re.compile( '[' + INVISIBLE_CHARACTERS + ']', re.UNICODE ) #: String to replace invisible characters with. This can be a character, a #: string, or even a function that takes a Python re matchobj INVISIBLE_REPLACEMENT_CHAR = '?' class BleachHTMLTokenizer(HTMLTokenizer): def consumeEntity(self, allowedChar=None, fromAttribute=False): # We don't want to consume and convert entities, so this overrides the # html5lib tokenizer's consumeEntity so that it's now a no-op. # # However, when that gets called, it's consumed an &, so we put that in # the steam. if fromAttribute: self.currentToken['data'][-1][1] += '&' else: self.tokenQueue.append({"type": tokenTypes['Characters'], "data": '&'}) class BleachHTMLParser(html5lib.HTMLParser): def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs): # Override HTMLParser so we can swap out the tokenizer for our own. self.innerHTMLMode = innerHTML self.container = container self.scripting = scripting self.tokenizer = BleachHTMLTokenizer(stream, parser=self, **kwargs) self.reset() try: self.mainLoop() except ReparseException: self.reset() self.mainLoop() class Cleaner(object): """Cleaner for cleaning HTML fragments of malicious content This cleaner is a security-focused function whose sole purpose is to remove malicious content from a string such that it can be displayed as content in a web page. This cleaner is not designed to use to transform content to be used in non-web-page contexts. To use:: from bleach.sanitizer import Cleaner cleaner = Cleaner() for text in all_the_yucky_things: sanitized = cleaner.clean(text) """ def __init__(self, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, styles=ALLOWED_STYLES, protocols=ALLOWED_PROTOCOLS, strip=False, strip_comments=True, filters=None): """Initializes a Cleaner :arg list tags: allowed list of tags; defaults to ``bleach.sanitizer.ALLOWED_TAGS`` :arg dict attributes: allowed attributes; can be a callable, list or dict; defaults to ``bleach.sanitizer.ALLOWED_ATTRIBUTES`` :arg list styles: allowed list of css styles; defaults to ``bleach.sanitizer.ALLOWED_STYLES`` :arg list protocols: allowed list of protocols for links; defaults to ``bleach.sanitizer.ALLOWED_PROTOCOLS`` :arg bool strip: whether or not to strip disallowed elements :arg bool strip_comments: whether or not to strip HTML comments :arg list filters: list of html5lib Filter classes to pass streamed content through .. seealso:: http://html5lib.readthedocs.io/en/latest/movingparts.html#filters .. Warning:: Using filters changes the output of ``bleach.Cleaner.clean``. Make sure the way the filters change the output are secure. """ self.tags = tags self.attributes = attributes self.styles = styles self.protocols = protocols self.strip = strip self.strip_comments = strip_comments self.filters = filters or [] self.parser = BleachHTMLParser(namespaceHTMLElements=False) self.walker = html5lib.getTreeWalker('etree') self.serializer = BleachHTMLSerializer( quote_attr_values='always', omit_optional_tags=False, escape_lt_in_attrs=True, # We want to leave entities as they are without escaping or # resolving or expanding resolve_entities=False, # Bleach has its own sanitizer, so don't use the html5lib one sanitize=False, # Bleach sanitizer alphabetizes already, so don't use the html5lib one alphabetical_attributes=False, ) def clean(self, text): """Cleans text and returns sanitized result as unicode :arg str text: text to be cleaned :returns: sanitized text as unicode :raises TypeError: if ``text`` is not a text type """ if not isinstance(text, six.string_types): message = "argument cannot be of '{name}' type, must be of text type".format( name=text.__class__.__name__) raise TypeError(message) if not text: return u'' text = force_unicode(text) dom = self.parser.parseFragment(text) filtered = BleachSanitizerFilter( source=self.walker(dom), # Bleach-sanitizer-specific things attributes=self.attributes, strip_disallowed_elements=self.strip, strip_html_comments=self.strip_comments, # html5lib-sanitizer things allowed_elements=self.tags, allowed_css_properties=self.styles, allowed_protocols=self.protocols, allowed_svg_properties=[], ) # Apply any filters after the BleachSanitizerFilter for filter_class in self.filters: filtered = filter_class(source=filtered) return self.serializer.render(filtered) def attribute_filter_factory(attributes): """Generates attribute filter function for the given attributes value The attributes value can take one of several shapes. This returns a filter function appropriate to the attributes value. One nice thing about this is that there's less if/then shenanigans in the ``allow_token`` method. """ if callable(attributes): return attributes if isinstance(attributes, dict): def _attr_filter(tag, attr, value): if tag in attributes: attr_val = attributes[tag] if callable(attr_val): return attr_val(tag, attr, value) if attr in attr_val: return True if '*' in attributes: attr_val = attributes['*'] if callable(attr_val): return attr_val(tag, attr, value) return attr in attr_val return False return _attr_filter if isinstance(attributes, list): def _attr_filter(tag, attr, value): return attr in attributes return _attr_filter raise ValueError('attributes needs to be a callable, a list or a dict') def match_entity(stream): """Returns first entity in stream or None if no entity exists Note: For Bleach purposes, entities must start with a "&" and end with a ";". :arg stream: the character stream :returns: ``None`` or the entity string without "&" or ";" """ # Nix the & at the beginning if stream[0] != '&': raise ValueError('Stream should begin with "&"') stream = stream[1:] stream = list(stream) possible_entity = '' end_characters = '<&=;' + string.whitespace # Handle number entities if stream and stream[0] == '#': possible_entity = '#' stream.pop(0) if stream and stream[0] in ('x', 'X'): allowed = '0123456789abcdefABCDEF' possible_entity += stream.pop(0) else: allowed = '0123456789' # FIXME(willkg): Do we want to make sure these are valid number # entities? This doesn't do that currently. while stream and stream[0] not in end_characters: c = stream.pop(0) if c not in allowed: break possible_entity += c if possible_entity and stream and stream[0] == ';': return possible_entity return None # Handle character entities while stream and stream[0] not in end_characters: c = stream.pop(0) if not ENTITIES_TRIE.has_keys_with_prefix(possible_entity): break possible_entity += c if possible_entity and stream and stream[0] == ';': return possible_entity return None def next_possible_entity(text): """Takes a text and generates a list of possible entities :arg text: the text to look at :returns: generator where each part (except the first) starts with an "&" """ for i, part in enumerate(AMP_SPLIT_RE.split(text)): if i == 0: yield part elif i % 2 == 0: yield '&' + part class BleachSanitizerFilter(sanitizer.Filter): """html5lib Filter that sanitizes text This filter can be used anywhere html5lib filters can be used. """ def __init__(self, source, attributes=ALLOWED_ATTRIBUTES, strip_disallowed_elements=False, strip_html_comments=True, **kwargs): """Creates a BleachSanitizerFilter instance :arg Treewalker source: stream :arg list tags: allowed list of tags; defaults to ``bleach.sanitizer.ALLOWED_TAGS`` :arg dict attributes: allowed attributes; can be a callable, list or dict; defaults to ``bleach.sanitizer.ALLOWED_ATTRIBUTES`` :arg list styles: allowed list of css styles; defaults to ``bleach.sanitizer.ALLOWED_STYLES`` :arg list protocols: allowed list of protocols for links; defaults to ``bleach.sanitizer.ALLOWED_PROTOCOLS`` :arg bool strip_disallowed_elements: whether or not to strip disallowed elements :arg bool strip_html_comments: whether or not to strip HTML comments """ self.attr_filter = attribute_filter_factory(attributes) self.strip_disallowed_elements = strip_disallowed_elements self.strip_html_comments = strip_html_comments return super(BleachSanitizerFilter, self).__init__(source, **kwargs) def __iter__(self): for token in Filter.__iter__(self): ret = self.sanitize_token(token) if not ret: continue if isinstance(ret, list): for subtoken in ret: yield subtoken else: yield ret def sanitize_token(self, token): """Sanitize a token either by HTML-encoding or dropping. Unlike sanitizer.Filter, allowed_attributes can be a dict of {'tag': ['attribute', 'pairs'], 'tag': callable}. Here callable is a function with two arguments of attribute name and value. It should return true of false. Also gives the option to strip tags instead of encoding. :arg dict token: token to sanitize :returns: token or list of tokens """ token_type = token['type'] if token_type in ['StartTag', 'EndTag', 'EmptyTag']: if token['name'] in self.allowed_elements: return self.allow_token(token) elif self.strip_disallowed_elements: return None else: if 'data' in token: # Alphabetize the attributes before calling .disallowed_token() # so that the resulting string is stable token['data'] = alphabetize_attributes(token['data']) return self.disallowed_token(token) elif token_type == 'Comment': if not self.strip_html_comments: return token else: return None elif token_type == 'Characters': return self.sanitize_characters(token) else: return token def sanitize_characters(self, token): """Handles Characters tokens Our overridden tokenizer doesn't do anything with entities. However, that means that the serializer will convert all ``&`` in Characters tokens to ``&amp;``. Since we don't want that, we extract entities here and convert them to Entity tokens so the serializer will let them be. :arg token: the Characters token to work on :returns: a list of tokens """ data = token.get('data', '') if not data: return token data = INVISIBLE_CHARACTERS_RE.sub(INVISIBLE_REPLACEMENT_CHAR, data) token['data'] = data # If there isn't a & in the data, we can return now if '&' not in data: return token new_tokens = [] # For each possible entity that starts with a "&", we try to extract an # actual entity and re-tokenize accordingly for part in next_possible_entity(data): if not part: continue if part.startswith('&'): entity = match_entity(part) if entity is not None: new_tokens.append({'type': 'Entity', 'name': entity}) # Length of the entity plus 2--one for & at the beginning # and and one for ; at the end part = part[len(entity) + 2:] if part: new_tokens.append({'type': 'Characters', 'data': part}) continue new_tokens.append({'type': 'Characters', 'data': part}) return new_tokens def allow_token(self, token): """Handles the case where we're allowing the tag""" if 'data' in token: # Loop through all the attributes and drop the ones that are not # allowed, are unsafe or break other rules. Additionally, fix # attribute values that need fixing. # # At the end of this loop, we have the final set of attributes # we're keeping. attrs = {} for namespaced_name, val in token['data'].items(): namespace, name = namespaced_name # Drop attributes that are not explicitly allowed # # NOTE(willkg): We pass in the attribute name--not a namespaced # name. if not self.attr_filter(token['name'], name, val): continue # Look at attributes that have uri values if namespaced_name in self.attr_val_is_uri: val_unescaped = re.sub( "[`\000-\040\177-\240\s]+", '', unescape(val)).lower() # Remove replacement characters from unescaped characters. val_unescaped = val_unescaped.replace("\ufffd", "") # Drop attributes with uri values that have protocols that # aren't allowed if (re.match(r'^[a-z0-9][-+.a-z0-9]*:', val_unescaped) and (val_unescaped.split(':')[0] not in self.allowed_protocols)): continue # Drop values in svg attrs with non-local IRIs if namespaced_name in self.svg_attr_val_allows_ref: new_val = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', ' ', unescape(val)) new_val = new_val.strip() if not new_val: continue else: # Replace the val with the unescaped version because # it's a iri val = new_val # Drop href and xlink:href attr for svg elements with non-local IRIs if (None, token['name']) in self.svg_allow_local_href: if namespaced_name in [(None, 'href'), (namespaces['xlink'], 'href')]: if re.search(r'^\s*[^#\s]', val): continue # If it's a style attribute, sanitize it if namespaced_name == (None, u'style'): val = self.sanitize_css(val) # At this point, we want to keep the attribute, so add it in attrs[namespaced_name] = val token['data'] = alphabetize_attributes(attrs) return token def disallowed_token(self, token): token_type = token["type"] if token_type == "EndTag": token["data"] = "</%s>" % token["name"] elif token["data"]: assert token_type in ("StartTag", "EmptyTag") attrs = [] for (ns, name), v in token["data"].items(): attrs.append(' %s="%s"' % ( name if ns is None else "%s:%s" % (prefixes[ns], name), # NOTE(willkg): HTMLSerializer escapes attribute values # already, so if we do it here (like HTMLSerializer does), # then we end up double-escaping. v) ) token["data"] = "<%s%s>" % (token["name"], ''.join(attrs)) else: token["data"] = "<%s>" % token["name"] if token.get("selfClosing"): token["data"] = token["data"][:-1] + "/>" token["type"] = "Characters" del token["name"] return token def sanitize_css(self, style): """Sanitizes css in style tags""" # disallow urls style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) # gauntlet # Validate the css in the style tag and if it's not valid, then drop # the whole thing. parts = style.split(';') gauntlet = re.compile( r"""^([-/:,#%.'"\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'\s*|"[\s\w]+"|\([\d,%\.\s]+\))*$""" ) for part in parts: if not gauntlet.match(part): return '' if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return '' clean = [] for prop, value in re.findall('([-\w]+)\s*:\s*([^:;]*)', style): if not value: continue if prop.lower() in self.allowed_css_properties: clean.append(prop + ': ' + value + ';') elif prop.lower() in self.allowed_svg_properties: clean.append(prop + ': ' + value + ';') return ' '.join(clean) class BleachHTMLSerializer(HTMLSerializer): """Wraps the HTMLSerializer and undoes & -> &amp; in attributes""" def escape_base_amp(self, stoken): """Escapes bare & in HTML attribute values""" # First, undo what the HTMLSerializer did stoken = stoken.replace('&amp;', '&') # Then, escape any bare & for part in next_possible_entity(stoken): if not part: continue if part.startswith('&'): entity = match_entity(part) if entity is not None: yield '&' + entity + ';' # Length of the entity plus 2--one for & at the beginning # and and one for ; at the end part = part[len(entity) + 2:] if part: yield part continue yield part.replace('&', '&amp;') def serialize(self, treewalker, encoding=None): """Wrap HTMLSerializer.serialize and escape bare & in attributes""" in_tag = False after_equals = False for stoken in super(BleachHTMLSerializer, self).serialize(treewalker, encoding): if in_tag: if stoken == '>': in_tag = False elif after_equals: if stoken != '"': for part in self.escape_base_amp(stoken): yield part after_equals = False continue elif stoken == '=': after_equals = True yield stoken else: if stoken.startswith('<'): in_tag = True yield stoken
./CrossVul/dataset_final_sorted/CWE-20/py/bad_652_0
crossvul-python_data_good_2141_1
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import multiprocessing import signal import os import pwd import Queue import random import traceback import tempfile import time import collections import socket import base64 import sys import pipes import jinja2 import subprocess import getpass import ansible.constants as C import ansible.inventory from ansible import utils from ansible.utils import template from ansible.utils import check_conditional from ansible.utils import string_functions from ansible import errors from ansible import module_common import poller import connection from return_data import ReturnData from ansible.callbacks import DefaultRunnerCallbacks, vv from ansible.module_common import ModuleReplacer module_replacer = ModuleReplacer(strip_comments=False) HAS_ATFORK=True try: from Crypto.Random import atfork except ImportError: HAS_ATFORK=False multiprocessing_runner = None OUTPUT_LOCKFILE = tempfile.TemporaryFile() PROCESS_LOCKFILE = tempfile.TemporaryFile() ################################################ def _executor_hook(job_queue, result_queue, new_stdin): # attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17 # this function also not present in CentOS 6 if HAS_ATFORK: atfork() signal.signal(signal.SIGINT, signal.SIG_IGN) while not job_queue.empty(): try: host = job_queue.get(block=False) return_data = multiprocessing_runner._executor(host, new_stdin) result_queue.put(return_data) except Queue.Empty: pass except: traceback.print_exc() class HostVars(dict): ''' A special view of vars_cache that adds values from the inventory when needed. ''' def __init__(self, vars_cache, inventory, vault_password=None): self.vars_cache = vars_cache self.inventory = inventory self.lookup = dict() self.update(vars_cache) self.vault_password = vault_password def __getitem__(self, host): if host not in self.lookup: result = self.inventory.get_variables(host, vault_password=self.vault_password).copy() result.update(self.vars_cache.get(host, {})) self.lookup[host] = result return self.lookup[host] class Runner(object): ''' core API interface to ansible ''' # see bin/ansible for how this is used... def __init__(self, host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage module_path=None, # ex: /usr/share/ansible module_name=C.DEFAULT_MODULE_NAME, # ex: copy module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b" forks=C.DEFAULT_FORKS, # parallelism level timeout=C.DEFAULT_TIMEOUT, # SSH timeout pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org' remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username' remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key remote_port=None, # if SSH on different ports private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None background=0, # async poll every X seconds, else 0 for non-async basedir=None, # directory of playbook, if applicable setup_cache=None, # used to share fact data w/ other tasks vars_cache=None, # used to store variables about hosts transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local' conditional='True', # run only if this fact expression evals to true callbacks=None, # used for output sudo=False, # whether to run sudo or not sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root' module_vars=None, # a playbooks internals thing default_vars=None, # ditto is_playbook=False, # running from playbook or not? inventory=None, # reference to Inventory object subset=None, # subset pattern check=False, # don't make any changes, just try to probe for potential changes diff=False, # whether to show diffs for template files that change environment=None, # environment variables (as dict) to use inside the command complex_args=None, # structured data in addition to module_args, must be a dict error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False accelerate=False, # use accelerated connection accelerate_ipv6=False, # accelerated connection w/ IPv6 accelerate_port=None, # port to use with accelerated connection su=False, # Are we running our command via su? su_user=None, # User to su to when running command, ex: 'root' su_pass=C.DEFAULT_SU_PASS, vault_pass=None, run_hosts=None, # an optional list of pre-calculated hosts to run on no_log=False, # option to enable/disable logging for a given task ): # used to lock multiprocess inputs and outputs at various levels self.output_lockfile = OUTPUT_LOCKFILE self.process_lockfile = PROCESS_LOCKFILE if not complex_args: complex_args = {} # storage & defaults self.check = check self.diff = diff self.setup_cache = utils.default(setup_cache, lambda: collections.defaultdict(dict)) self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict)) self.basedir = utils.default(basedir, lambda: os.getcwd()) self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks()) self.generated_jid = str(random.randint(0, 999999999999)) self.transport = transport self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list)) self.module_vars = utils.default(module_vars, lambda: {}) self.default_vars = utils.default(default_vars, lambda: {}) self.always_run = None self.connector = connection.Connector(self) self.conditional = conditional self.module_name = module_name self.forks = int(forks) self.pattern = pattern self.module_args = module_args self.timeout = timeout self.remote_user = remote_user self.remote_pass = remote_pass self.remote_port = remote_port self.private_key_file = private_key_file self.background = background self.sudo = sudo self.sudo_user_var = sudo_user self.sudo_user = None self.sudo_pass = sudo_pass self.is_playbook = is_playbook self.environment = environment self.complex_args = complex_args self.error_on_undefined_vars = error_on_undefined_vars self.accelerate = accelerate self.accelerate_port = accelerate_port self.accelerate_ipv6 = accelerate_ipv6 self.callbacks.runner = self self.su = su self.su_user_var = su_user self.su_user = None self.su_pass = su_pass self.vault_pass = vault_pass self.no_log = no_log if self.transport == 'smart': # if the transport is 'smart' see if SSH can support ControlPersist if not use paramiko # 'smart' is the default since 1.2.1/1.3 cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = cmd.communicate() if "Bad configuration option" in err: self.transport = "paramiko" else: self.transport = "ssh" # save the original transport, in case it gets # changed later via options like accelerate self.original_transport = self.transport # misc housekeeping if subset and self.inventory._subset is None: # don't override subset when passed from playbook self.inventory.subset(subset) # If we get a pre-built list of hosts to run on, from say a playbook, use them. # Also where we will store the hosts to run on once discovered self.run_hosts = run_hosts if self.transport == 'local': self.remote_user = pwd.getpwuid(os.geteuid())[0] if module_path is not None: for i in module_path.split(os.pathsep): utils.plugins.module_finder.add_directory(i) utils.plugins.push_basedir(self.basedir) # ensure we are using unique tmp paths random.seed() # ***************************************************** def _complex_args_hack(self, complex_args, module_args): """ ansible-playbook both allows specifying key=value string arguments and complex arguments however not all modules use our python common module system and cannot access these. An example might be a Bash module. This hack allows users to still pass "args" as a hash of simple scalars to those arguments and is short term. We could technically just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented it does mean values in 'args' have LOWER priority than those on the key=value line, allowing args to provide yet another way to have pluggable defaults. """ if complex_args is None: return module_args if not isinstance(complex_args, dict): raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args) for (k,v) in complex_args.iteritems(): if isinstance(v, basestring): module_args = "%s=%s %s" % (k, pipes.quote(v), module_args) return module_args # ***************************************************** def _transfer_str(self, conn, tmp, name, data): ''' transfer string to remote file ''' if type(data) == dict: data = utils.jsonify(data) afd, afile = tempfile.mkstemp() afo = os.fdopen(afd, 'w') try: if not isinstance(data, unicode): #ensure the data is valid UTF-8 data.decode('utf-8') else: data = data.encode('utf-8') afo.write(data) except: raise errors.AnsibleError("failure encoding into utf-8") afo.flush() afo.close() remote = conn.shell.join_path(tmp, name) try: conn.put_file(afile, remote) finally: os.unlink(afile) return remote # ***************************************************** def _compute_environment_string(self, conn, inject=None): ''' what environment variables to use when running the command? ''' enviro = {} if self.environment: enviro = template.template(self.basedir, self.environment, inject, convert_bare=True) enviro = utils.safe_eval(enviro) if type(enviro) != dict: raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro) return conn.shell.env_prefix(**enviro) # ***************************************************** def _compute_delegate(self, host, password, remote_inject): """ Build a dictionary of all attributes for the delegate host """ delegate = {} # allow delegated host to be templated delegate['host'] = template.template(self.basedir, host, remote_inject, fail_on_undefined=True) delegate['inject'] = remote_inject.copy() # set any interpreters interpreters = [] for i in delegate['inject']: if i.startswith("ansible_") and i.endswith("_interpreter"): interpreters.append(i) for i in interpreters: del delegate['inject'][i] port = C.DEFAULT_REMOTE_PORT this_host = delegate['host'] # get the vars for the delegate by it's name try: this_info = delegate['inject']['hostvars'][this_host] except: # make sure the inject is empty for non-inventory hosts this_info = {} # get the real ssh_address for the delegate # and allow ansible_ssh_host to be templated delegate['ssh_host'] = template.template(self.basedir, this_info.get('ansible_ssh_host', this_host), this_info, fail_on_undefined=True) delegate['port'] = this_info.get('ansible_ssh_port', port) delegate['user'] = self._compute_delegate_user(this_host, delegate['inject']) delegate['pass'] = this_info.get('ansible_ssh_pass', password) delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file) delegate['transport'] = this_info.get('ansible_connection', self.transport) delegate['sudo_pass'] = this_info.get('ansible_sudo_pass', self.sudo_pass) # Last chance to get private_key_file from global variables. # this is usefull if delegated host is not defined in the inventory if delegate['private_key_file'] is None: delegate['private_key_file'] = remote_inject.get( 'ansible_ssh_private_key_file', None) if delegate['private_key_file'] is not None: delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file']) for i in this_info: if i.startswith("ansible_") and i.endswith("_interpreter"): delegate['inject'][i] = this_info[i] return delegate def _compute_delegate_user(self, host, inject): """ Caculate the remote user based on an order of preference """ # inventory > playbook > original_host actual_user = inject.get('ansible_ssh_user', self.remote_user) thisuser = None if host in inject['hostvars']: if inject['hostvars'][host].get('ansible_ssh_user'): # user for delegate host in inventory thisuser = inject['hostvars'][host].get('ansible_ssh_user') if thisuser is None and self.remote_user: # user defined by play/runner thisuser = self.remote_user if thisuser is not None: actual_user = thisuser else: # fallback to the inventory user of the play host #actual_user = inject.get('ansible_ssh_user', actual_user) actual_user = inject.get('ansible_ssh_user', self.remote_user) return actual_user # ***************************************************** def _execute_module(self, conn, tmp, module_name, args, async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None, delete_remote_tmp=True): ''' transfer and run a module along with its arguments on the remote side''' # hack to support fireball mode if module_name == 'fireball': args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host)))) if 'port' not in args: args += " port=%s" % C.ZEROMQ_PORT ( module_style, shebang, module_data ) = self._configure_module(conn, module_name, args, inject, complex_args) # a remote tmp path may be necessary and not already created if self._late_needs_tmp_path(conn, tmp, module_style): tmp = self._make_tmp_path(conn) remote_module_path = conn.shell.join_path(tmp, module_name) if (module_style != 'new' or async_jid is not None or not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.su): self._transfer_str(conn, tmp, module_name, module_data) environment_string = self._compute_environment_string(conn, inject) if "tmp" in tmp and ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): # deal with possible umask issues once sudo'ed to other user self._remote_chmod(conn, 'a+r', remote_module_path, tmp) cmd = "" in_data = None if module_style != 'new': if 'CHECKMODE=True' in args: # if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to # do --check mode, so to be safe we will not run it. return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot yet run check mode against old-style modules")) elif 'NO_LOG' in args: return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot use no_log: with old-style modules")) args = template.template(self.basedir, args, inject) # decide whether we need to transfer JSON or key=value argsfile = None if module_style == 'non_native_want_json': if complex_args: complex_args.update(utils.parse_kv(args)) argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args)) else: argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args))) else: argsfile = self._transfer_str(conn, tmp, 'arguments', args) if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # deal with possible umask issues once sudo'ed to other user self._remote_chmod(conn, 'a+r', argsfile, tmp) if async_jid is None: cmd = "%s %s" % (remote_module_path, argsfile) else: cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]]) else: if async_jid is None: if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.su: in_data = module_data else: cmd = "%s" % (remote_module_path) else: cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]]) if not shebang: raise errors.AnsibleError("module is missing interpreter line") rm_tmp = None if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if not self.sudo or self.su or self.sudo_user == 'root' or self.su_user == 'root': # not sudoing or sudoing to root, so can cleanup files in the same step rm_tmp = tmp cmd = conn.shell.build_module_command(environment_string, shebang, cmd, rm_tmp) cmd = cmd.strip() sudoable = True if module_name == "accelerate": # always run the accelerate module as the user # specified in the play, not the sudo_user sudoable = False if self.su: res = self._low_level_exec_command(conn, cmd, tmp, su=True, in_data=in_data) else: res = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, in_data=in_data) if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # not sudoing to root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step cmd2 = conn.shell.remove(tmp, recurse=True) self._low_level_exec_command(conn, cmd2, tmp, sudoable=False) data = utils.parse_json(res['stdout'], from_remote=True) if 'parsed' in data and data['parsed'] == False: data['msg'] += res['stderr'] return ReturnData(conn=conn, result=data) # ***************************************************** def _executor(self, host, new_stdin): ''' handler for multiprocessing library ''' try: fileno = sys.stdin.fileno() except ValueError: fileno = None try: self._new_stdin = new_stdin if not new_stdin and fileno is not None: try: self._new_stdin = os.fdopen(os.dup(fileno)) except OSError, e: # couldn't dupe stdin, most likely because it's # not a valid file descriptor, so we just rely on # using the one that was passed in pass exec_rc = self._executor_internal(host, new_stdin) if type(exec_rc) != ReturnData: raise Exception("unexpected return type: %s" % type(exec_rc)) # redundant, right? if not exec_rc.comm_ok: self.callbacks.on_unreachable(host, exec_rc.result) return exec_rc except errors.AnsibleError, ae: msg = str(ae) self.callbacks.on_unreachable(host, msg) return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg)) except Exception: msg = traceback.format_exc() self.callbacks.on_unreachable(host, msg) return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg)) # ***************************************************** def _executor_internal(self, host, new_stdin): ''' executes any module one or more times ''' host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass) host_connection = host_variables.get('ansible_connection', self.transport) if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]: port = host_variables.get('ansible_ssh_port', self.remote_port) if port is None: port = C.DEFAULT_REMOTE_PORT else: # fireball, local, etc port = self.remote_port # merge the VARS and SETUP caches for this host combined_cache = self.setup_cache.copy() combined_cache.setdefault(host, {}).update(self.vars_cache.get(host, {})) hostvars = HostVars(combined_cache, self.inventory, vault_password=self.vault_pass) # use combined_cache and host_variables to template the module_vars # we update the inject variables with the data we're about to template # since some of the variables we'll be replacing may be contained there too module_vars_inject = utils.combine_vars(host_variables, combined_cache.get(host, {})) module_vars_inject = utils.combine_vars(self.module_vars, module_vars_inject) module_vars = template.template(self.basedir, self.module_vars, module_vars_inject) inject = {} inject = utils.combine_vars(inject, self.default_vars) inject = utils.combine_vars(inject, host_variables) inject = utils.combine_vars(inject, module_vars) inject = utils.combine_vars(inject, combined_cache.get(host, {})) inject.setdefault('ansible_ssh_user', self.remote_user) inject['hostvars'] = hostvars inject['group_names'] = host_variables.get('group_names', []) inject['groups'] = self.inventory.groups_list() inject['vars'] = self.module_vars inject['defaults'] = self.default_vars inject['environment'] = self.environment inject['playbook_dir'] = self.basedir if self.inventory.basedir() is not None: inject['inventory_dir'] = self.inventory.basedir() if self.inventory.src() is not None: inject['inventory_file'] = self.inventory.src() # allow with_foo to work in playbooks... items = None items_plugin = self.module_vars.get('items_lookup_plugin', None) if items_plugin is not None and items_plugin in utils.plugins.lookup_loader: basedir = self.basedir if '_original_file' in inject: basedir = os.path.dirname(inject['_original_file']) filesdir = os.path.join(basedir, '..', 'files') if os.path.exists(filesdir): basedir = filesdir items_terms = self.module_vars.get('items_lookup_terms', '') items_terms = template.template(basedir, items_terms, inject) items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject) if type(items) != list: raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng' ]: # hack for apt, yum, and pkgng so that with_items maps back into a single module call use_these_items = [] for x in items: inject['item'] = x if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): use_these_items.append(x) inject['item'] = ",".join(use_these_items) items = None # logic to replace complex args if possible complex_args = self.complex_args # logic to decide how to run things depends on whether with_items is used if items is None: if isinstance(complex_args, basestring): complex_args = template.template(self.basedir, complex_args, inject, convert_bare=True) complex_args = utils.safe_eval(complex_args) if type(complex_args) != dict: raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args) elif len(items) > 0: # executing using with_items, so make multiple calls # TODO: refactor if self.background > 0: raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks") all_comm_ok = True all_changed = False all_failed = False results = [] for x in items: # use a fresh inject for each item this_inject = inject.copy() this_inject['item'] = x # TODO: this idiom should be replaced with an up-conversion to a Jinja2 template evaluation if isinstance(self.complex_args, basestring): complex_args = template.template(self.basedir, self.complex_args, this_inject, convert_bare=True) complex_args = utils.safe_eval(complex_args) if type(complex_args) != dict: raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) result = self._executor_internal_inner( host, self.module_name, self.module_args, this_inject, port, complex_args=complex_args ) results.append(result.result) if result.comm_ok == False: all_comm_ok = False all_failed = True break for x in results: if x.get('changed') == True: all_changed = True if (x.get('failed') == True) or ('failed_when_result' in x and [x['failed_when_result']] or [('rc' in x) and (x['rc'] != 0)])[0]: all_failed = True break msg = 'All items completed' if all_failed: msg = "One or more items failed." rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg) if not all_failed: del rd_result['failed'] return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result) else: self.callbacks.on_skipped(host, None) return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True)) # ***************************************************** def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None): ''' decides how to invoke a module ''' # late processing of parameterized sudo_user (with_items,..) if self.sudo_user_var is not None: self.sudo_user = template.template(self.basedir, self.sudo_user_var, inject) if self.su_user_var is not None: self.su_user = template.template(self.basedir, self.su_user_var, inject) # allow module args to work as a dictionary # though it is usually a string new_args = "" if type(module_args) == dict: for (k,v) in module_args.iteritems(): new_args = new_args + "%s='%s' " % (k,v) module_args = new_args # module_name may be dynamic (but cannot contain {{ ansible_ssh_user }}) module_name = template.template(self.basedir, module_name, inject) if module_name in utils.plugins.action_loader: if self.background != 0: raise errors.AnsibleError("async mode is not supported with the %s module" % module_name) handler = utils.plugins.action_loader.get(module_name, self) elif self.background == 0: handler = utils.plugins.action_loader.get('normal', self) else: handler = utils.plugins.action_loader.get('async', self) if type(self.conditional) != list: self.conditional = [ self.conditional ] for cond in self.conditional: if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): result = utils.jsonify(dict(changed=False, skipped=True)) self.callbacks.on_skipped(host, inject.get('item',None)) return ReturnData(host=host, result=result) if getattr(handler, 'setup', None) is not None: handler.setup(module_name, inject) conn = None actual_host = inject.get('ansible_ssh_host', host) # allow ansible_ssh_host to be templated actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True) actual_port = port actual_user = inject.get('ansible_ssh_user', self.remote_user) actual_pass = inject.get('ansible_ssh_pass', self.remote_pass) actual_transport = inject.get('ansible_connection', self.transport) actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file) actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True) self.sudo = utils.boolean(inject.get('ansible_sudo', self.sudo)) self.sudo_user = inject.get('ansible_sudo_user', self.sudo_user) self.sudo_pass = inject.get('ansible_sudo_pass', self.sudo_pass) self.su = inject.get('ansible_su', self.su) self.su_pass = inject.get('ansible_su_pass', self.su_pass) # select default root user in case self.sudo requested # but no user specified; happens e.g. in host vars when # just ansible_sudo=True is specified if self.sudo and self.sudo_user is None: self.sudo_user = 'root' if actual_private_key_file is not None: actual_private_key_file = os.path.expanduser(actual_private_key_file) if self.accelerate and actual_transport != 'local': #Fix to get the inventory name of the host to accelerate plugin if inject.get('ansible_ssh_host', None): self.accelerate_inventory_host = host else: self.accelerate_inventory_host = None # if we're using accelerated mode, force the # transport to accelerate actual_transport = "accelerate" if not self.accelerate_port: self.accelerate_port = C.ACCELERATE_PORT actual_port = inject.get('ansible_ssh_port', port) # the delegated host may have different SSH port configured, etc # and we need to transfer those, and only those, variables delegate_to = inject.get('delegate_to', None) if delegate_to is not None: delegate = self._compute_delegate(delegate_to, actual_pass, inject) actual_transport = delegate['transport'] actual_host = delegate['ssh_host'] actual_port = delegate['port'] actual_user = delegate['user'] actual_pass = delegate['pass'] actual_private_key_file = delegate['private_key_file'] self.sudo_pass = delegate['sudo_pass'] inject = delegate['inject'] # user/pass may still contain variables at this stage actual_user = template.template(self.basedir, actual_user, inject) actual_pass = template.template(self.basedir, actual_pass, inject) self.sudo_pass = template.template(self.basedir, self.sudo_pass, inject) # make actual_user available as __magic__ ansible_ssh_user variable inject['ansible_ssh_user'] = actual_user try: if actual_transport == 'accelerate': # for accelerate, we stuff both ports into a single # variable so that we don't have to mangle other function # calls just to accomodate this one case actual_port = [actual_port, self.accelerate_port] elif actual_port is not None: actual_port = int(template.template(self.basedir, actual_port, inject)) except ValueError, e: result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port) return ReturnData(host=host, comm_ok=False, result=result) try: conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file) if delegate_to or host != actual_host: conn.delegate = host default_shell = getattr(conn, 'default_shell', '') shell_type = inject.get('ansible_shell_type') if not shell_type: if default_shell: shell_type = default_shell else: shell_type = os.path.basename(C.DEFAULT_EXECUTABLE) shell_plugin = utils.plugins.shell_loader.get(shell_type) if shell_plugin is None: shell_plugin = utils.plugins.shell_loader.get('sh') conn.shell = shell_plugin except errors.AnsibleConnectionFailed, e: result = dict(failed=True, msg="FAILED: %s" % str(e)) return ReturnData(host=host, comm_ok=False, result=result) tmp = '' # action plugins may DECLARE via TRANSFERS_FILES = True that they need a remote tmp path working dir if self._early_needs_tmp_path(module_name, handler): tmp = self._make_tmp_path(conn) # render module_args and complex_args templates try: module_args = template.template(self.basedir, module_args, inject, fail_on_undefined=self.error_on_undefined_vars) complex_args = template.template(self.basedir, complex_args, inject, fail_on_undefined=self.error_on_undefined_vars) except jinja2.exceptions.UndefinedError, e: raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e)) result = handler.run(conn, tmp, module_name, module_args, inject, complex_args) # Code for do until feature until = self.module_vars.get('until', None) if until is not None and result.comm_ok: inject[self.module_vars.get('register')] = result.result cond = template.template(self.basedir, until, inject, expand_lists=False) if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): retries = self.module_vars.get('retries') delay = self.module_vars.get('delay') for x in range(1, int(retries) + 1): # template the delay, cast to float and sleep delay = template.template(self.basedir, delay, inject, expand_lists=False) delay = float(delay) time.sleep(delay) tmp = '' if self._early_needs_tmp_path(module_name, handler): tmp = self._make_tmp_path(conn) result = handler.run(conn, tmp, module_name, module_args, inject, complex_args) result.result['attempts'] = x vv("Result from run %i is: %s" % (x, result.result)) inject[self.module_vars.get('register')] = result.result cond = template.template(self.basedir, until, inject, expand_lists=False) if utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): break if result.result['attempts'] == retries and not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): result.result['failed'] = True result.result['msg'] = "Task failed as maximum retries was encountered" else: result.result['attempts'] = 0 conn.close() if not result.comm_ok: # connection or parsing errors... self.callbacks.on_unreachable(host, result.result) else: data = result.result # https://github.com/ansible/ansible/issues/4958 if hasattr(sys.stdout, "isatty"): if "stdout" in data and sys.stdout.isatty(): if not string_functions.isprintable(data['stdout']): data['stdout'] = '' if 'item' in inject: result.result['item'] = inject['item'] result.result['invocation'] = dict( module_args=module_args, module_name=module_name ) changed_when = self.module_vars.get('changed_when') failed_when = self.module_vars.get('failed_when') if (changed_when is not None or failed_when is not None) and self.background == 0: register = self.module_vars.get('register') if register is not None: if 'stdout' in data: data['stdout_lines'] = data['stdout'].splitlines() inject[register] = data # only run the final checks if the async_status has finished, # or if we're not running an async_status check at all if (module_name == 'async_status' and "finished" in data) or module_name != 'async_status': if changed_when is not None and 'skipped' not in data: data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) if failed_when is not None and 'skipped' not in data: data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) if is_chained: # no callbacks return result if 'skipped' in data: self.callbacks.on_skipped(host, inject.get('item',None)) elif not result.is_successful(): ignore_errors = self.module_vars.get('ignore_errors', False) self.callbacks.on_failed(host, data, ignore_errors) else: if self.diff: self.callbacks.on_file_diff(conn.host, result.diff) self.callbacks.on_ok(host, data) return result def _early_needs_tmp_path(self, module_name, handler): ''' detect if a tmp path should be created before the handler is called ''' if module_name in utils.plugins.action_loader: return getattr(handler, 'TRANSFERS_FILES', False) # other modules never need tmp path at early stage return False def _late_needs_tmp_path(self, conn, tmp, module_style): if "tmp" in tmp: # tmp has already been created return False if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.su: # tmp is necessary to store module source code return True if not conn.has_pipelining: # tmp is necessary to store the module source code # or we want to keep the files on the target system return True if module_style != "new": # even when conn has pipelining, old style modules need tmp to store arguments return True return False # ***************************************************** def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False, executable=None, su=False, in_data=None): ''' execute a command string over SSH, return the output ''' if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) return dict(stdout='', stderr='') if executable is None: executable = C.DEFAULT_EXECUTABLE sudo_user = self.sudo_user su_user = self.su_user # compare connection user to (su|sudo)_user and disable if the same # assume connection type is local if no user attribute this_user = getattr(conn, 'user', getpass.getuser()) if (not su and this_user == sudo_user) or (su and this_user == su_user): sudoable = False su = False if su: rc, stdin, stdout, stderr = conn.exec_command(cmd, tmp, su=su, su_user=su_user, executable=executable, in_data=in_data) else: rc, stdin, stdout, stderr = conn.exec_command(cmd, tmp, sudo_user, sudoable=sudoable, executable=executable, in_data=in_data) if type(stdout) not in [ str, unicode ]: out = ''.join(stdout.readlines()) else: out = stdout if type(stderr) not in [ str, unicode ]: err = ''.join(stderr.readlines()) else: err = stderr if rc is not None: return dict(rc=rc, stdout=out, stderr=err) else: return dict(stdout=out, stderr=err) # ***************************************************** def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, su=False): ''' issue a remote chmod command ''' cmd = conn.shell.chmod(mode, path) return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, su=su) # ***************************************************** def _remote_md5(self, conn, tmp, path): ''' takes a remote md5sum without requiring python, and returns 1 if no file ''' cmd = conn.shell.md5(path) data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) data2 = utils.last_non_blank_line(data['stdout']) try: if data2 == '': # this may happen if the connection to the remote server # failed, so just return "INVALIDMD5SUM" to avoid errors return "INVALIDMD5SUM" else: return data2.split()[0] except IndexError: sys.stderr.write("warning: md5sum command failed unusually, please report this to the list so it can be fixed\n") sys.stderr.write("command: %s\n" % md5s) sys.stderr.write("----\n") sys.stderr.write("output: %s\n" % data) sys.stderr.write("----\n") # this will signal that it changed and allow things to keep going return "INVALIDMD5SUM" # ***************************************************** def _make_tmp_path(self, conn): ''' make and return a temporary path on a remote box ''' basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) use_system_tmp = False if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): use_system_tmp = True tmp_mode = None if self.remote_user != 'root' or ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): tmp_mode = 'a+rx' cmd = conn.shell.mkdtemp(basefile, use_system_tmp, tmp_mode) result = self._low_level_exec_command(conn, cmd, None, sudoable=False) # error handling on this seems a little aggressive? if result['rc'] != 0: if result['rc'] == 5: output = 'Authentication failure.' elif result['rc'] == 255 and self.transport in ['ssh']: if utils.VERBOSITY > 3: output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr']) else: output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue' else: output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc']) if 'stdout' in result and result['stdout'] != '': output = output + ": %s" % result['stdout'] raise errors.AnsibleError(output) rc = conn.shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '') # Catch failure conditions, files should never be # written to locations in /. if rc == '/': raise errors.AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd)) return rc # ***************************************************** def _remove_tmp_path(self, conn, tmp_path): ''' Remove a tmp_path. ''' if "-tmp-" in tmp_path: cmd = conn.shell.remove(tmp_path, recurse=True) self._low_level_exec_command(conn, cmd, None, sudoable=False) # If we have gotten here we have a working ssh configuration. # If ssh breaks we could leave tmp directories out on the remote system. # ***************************************************** def _copy_module(self, conn, tmp, module_name, module_args, inject, complex_args=None): ''' transfer a module over SFTP, does not run it ''' ( module_style, module_shebang, module_data ) = self._configure_module(conn, module_name, module_args, inject, complex_args) module_remote_path = conn.shell.join_path(tmp, module_name) self._transfer_str(conn, tmp, module_name, module_data) return (module_remote_path, module_style, module_shebang) # ***************************************************** def _configure_module(self, conn, module_name, module_args, inject, complex_args=None): ''' find module and configure it ''' # Search module path(s) for named module. module_suffixes = getattr(conn, 'default_suffixes', None) module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes) if module_path is None: raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths())) # insert shared code and arguments into the module (module_data, module_style, module_shebang) = module_replacer.modify_module( module_path, complex_args, module_args, inject ) return (module_style, module_shebang, module_data) # ***************************************************** def _parallel_exec(self, hosts): ''' handles mulitprocessing when more than 1 fork is required ''' manager = multiprocessing.Manager() job_queue = manager.Queue() for host in hosts: job_queue.put(host) result_queue = manager.Queue() try: fileno = sys.stdin.fileno() except ValueError: fileno = None workers = [] for i in range(self.forks): new_stdin = None if fileno is not None: try: new_stdin = os.fdopen(os.dup(fileno)) except OSError, e: # couldn't dupe stdin, most likely because it's # not a valid file descriptor, so we just rely on # using the one that was passed in pass prc = multiprocessing.Process(target=_executor_hook, args=(job_queue, result_queue, new_stdin)) prc.start() workers.append(prc) try: for worker in workers: worker.join() except KeyboardInterrupt: for worker in workers: worker.terminate() worker.join() results = [] try: while not result_queue.empty(): results.append(result_queue.get(block=False)) except socket.error: raise errors.AnsibleError("<interrupted>") return results # ***************************************************** def _partition_results(self, results): ''' separate results by ones we contacted & ones we didn't ''' if results is None: return None results2 = dict(contacted={}, dark={}) for result in results: host = result.host if host is None: raise Exception("internal error, host not set") if result.communicated_ok(): results2["contacted"][host] = result.result else: results2["dark"][host] = result.result # hosts which were contacted but never got a chance to return for host in self.run_hosts: if not (host in results2['dark'] or host in results2['contacted']): results2["dark"][host] = {} return results2 # ***************************************************** def run(self): ''' xfer & run module on all matched hosts ''' # find hosts that match the pattern if not self.run_hosts: self.run_hosts = self.inventory.list_hosts(self.pattern) hosts = self.run_hosts if len(hosts) == 0: self.callbacks.on_no_hosts() return dict(contacted={}, dark={}) global multiprocessing_runner multiprocessing_runner = self results = None # Check if this is an action plugin. Some of them are designed # to be ran once per group of hosts. Example module: pause, # run once per hostgroup, rather than pausing once per each # host. p = utils.plugins.action_loader.get(self.module_name, self) if self.forks == 0 or self.forks > len(hosts): self.forks = len(hosts) if p and getattr(p, 'BYPASS_HOST_LOOP', None): # Expose the current hostgroup to the bypassing plugins self.host_set = hosts # We aren't iterating over all the hosts in this # group. So, just pick the first host in our group to # construct the conn object with. result_data = self._executor(hosts[0], None).result # Create a ResultData item for each host in this group # using the returned result. If we didn't do this we would # get false reports of dark hosts. results = [ ReturnData(host=h, result=result_data, comm_ok=True) \ for h in hosts ] del self.host_set elif self.forks > 1: try: results = self._parallel_exec(hosts) except IOError, ie: print ie.errno if ie.errno == 32: # broken pipe from Ctrl+C raise errors.AnsibleError("interrupted") raise else: results = [ self._executor(h, None) for h in hosts ] return self._partition_results(results) # ***************************************************** def run_async(self, time_limit): ''' Run this module asynchronously and return a poller. ''' self.background = time_limit results = self.run() return results, poller.AsyncPoller(results, self) # ***************************************************** def noop_on_check(self, inject): ''' Should the runner run in check mode or not ? ''' # initialize self.always_run on first call if self.always_run is None: self.always_run = self.module_vars.get('always_run', False) self.always_run = check_conditional( self.always_run, self.basedir, inject, fail_on_undefined=True) return (self.check and not self.always_run)
./CrossVul/dataset_final_sorted/CWE-20/py/good_2141_1
crossvul-python_data_good_1232_0
### # Copyright (c) 2019, Valentin Lorentz # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### import ast import math import cmath import operator class InvalidNode(Exception): pass def filter_module(module, safe_names): return dict([ (name, getattr(module, name)) for name in safe_names if hasattr(module, name) ]) UNARY_OPS = { ast.UAdd: lambda x: x, ast.USub: lambda x: -x, } BIN_OPS = { ast.Add: operator.add, ast.Sub: operator.sub, ast.Mult: operator.mul, ast.Div: operator.truediv, ast.Pow: operator.pow, ast.BitXor: operator.xor, ast.BitOr: operator.or_, ast.BitAnd: operator.and_, } MATH_CONSTANTS = 'e inf nan pi tau'.split() SAFE_MATH_FUNCTIONS = ( 'acos acosh asin asinh atan atan2 atanh copysign cos cosh degrees erf ' 'erfc exp expm1 fabs fmod frexp fsum gamma hypot ldexp lgamma log log10 ' 'log1p log2 modf pow radians remainder sin sinh tan tanh' ).split() SAFE_CMATH_FUNCTIONS = ( 'acos acosh asin asinh atan atanh cos cosh exp inf infj log log10 ' 'nanj phase polar rect sin sinh tan tanh tau' ).split() SAFE_ENV = filter_module(math, MATH_CONSTANTS + SAFE_MATH_FUNCTIONS) SAFE_ENV.update(filter_module(cmath, SAFE_CMATH_FUNCTIONS)) def _sqrt(x): if isinstance(x, complex) or x < 0: return cmath.sqrt(x) else: return math.sqrt(x) def _cbrt(x): return math.pow(x, 1.0/3) def _factorial(x): if x<=10000: return float(math.factorial(x)) else: raise Exception('factorial argument too large') SAFE_ENV.update({ 'i': 1j, 'abs': abs, 'max': max, 'min': min, 'round': lambda x, y=0: round(x, int(y)), 'factorial': _factorial, 'sqrt': _sqrt, 'cbrt': _cbrt, 'ceil': lambda x: float(math.ceil(x)), 'floor': lambda x: float(math.floor(x)), }) UNSAFE_ENV = SAFE_ENV.copy() # Add functions that return integers UNSAFE_ENV.update(filter_module(math, 'ceil floor factorial gcd'.split())) # It would be nice if ast.literal_eval used a visitor so we could subclass # to extend it, but it doesn't, so let's reimplement it entirely. class SafeEvalVisitor(ast.NodeVisitor): def __init__(self, allow_ints): self._allow_ints = allow_ints self._env = UNSAFE_ENV if allow_ints else SAFE_ENV def _convert_num(self, x): """Converts numbers to complex if ints are not allowed.""" if self._allow_ints: return x else: x = complex(x) if x.imag == 0: x = x.real # Need to use string-formatting here instead of str() because # use of str() on large numbers loses information: # str(float(33333333333333)) => '3.33333333333e+13' # float('3.33333333333e+13') => 33333333333300.0 return float('%.16f' % x) else: return x def visit_Expression(self, node): return self.visit(node.body) def visit_Num(self, node): return self._convert_num(node.n) def visit_Name(self, node): id_ = node.id.lower() if id_ in self._env: return self._env[id_] else: raise NameError(node.id) def visit_Call(self, node): func = self.visit(node.func) args = map(self.visit, node.args) # TODO: keywords? return func(*args) def visit_UnaryOp(self, node): op = UNARY_OPS.get(node.op.__class__) if op: return op(self.visit(node.operand)) else: raise InvalidNode('illegal operator %s' % node.op.__class__.__name__) def visit_BinOp(self, node): op = BIN_OPS.get(node.op.__class__) if op: return op(self.visit(node.left), self.visit(node.right)) else: raise InvalidNode('illegal operator %s' % node.op.__class__.__name__) def generic_visit(self, node): raise InvalidNode('illegal construct %s' % node.__class__.__name__) def safe_eval(text, allow_ints): node = ast.parse(text, mode='eval') return SafeEvalVisitor(allow_ints).visit(node)
./CrossVul/dataset_final_sorted/CWE-20/py/good_1232_0
crossvul-python_data_good_3766_1
from __future__ import with_statement import os import re import urllib from django.conf import settings from django.contrib.sites.models import Site, RequestSite from django.contrib.auth.models import User from django.core import mail from django.core.exceptions import SuspiciousOperation from django.core.urlresolvers import reverse, NoReverseMatch from django.http import QueryDict from django.utils.encoding import force_unicode from django.utils.html import escape from django.test import TestCase from django.test.utils import override_settings from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm, SetPasswordForm, PasswordResetForm) class AuthViewsTestCase(TestCase): """ Helper base class for all the follow test cases. """ fixtures = ['authtestdata.json'] urls = 'django.contrib.auth.tests.urls' def setUp(self): self.old_LANGUAGES = settings.LANGUAGES self.old_LANGUAGE_CODE = settings.LANGUAGE_CODE settings.LANGUAGES = (('en', 'English'),) settings.LANGUAGE_CODE = 'en' self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS settings.TEMPLATE_DIRS = ( os.path.join(os.path.dirname(__file__), 'templates'), ) def tearDown(self): settings.LANGUAGES = self.old_LANGUAGES settings.LANGUAGE_CODE = self.old_LANGUAGE_CODE settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS def login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL)) self.assertTrue(SESSION_KEY in self.client.session) def assertContainsEscaped(self, response, text, **kwargs): return self.assertContains(response, escape(force_unicode(text)), **kwargs) AuthViewsTestCase = override_settings(USE_TZ=False)(AuthViewsTestCase) class AuthViewNamedURLTests(AuthViewsTestCase): urls = 'django.contrib.auth.urls' def test_named_urls(self): "Named URLs should be reversible" expected_named_urls = [ ('login', [], {}), ('logout', [], {}), ('password_change', [], {}), ('password_change_done', [], {}), ('password_reset', [], {}), ('password_reset_done', [], {}), ('password_reset_confirm', [], { 'uidb36': 'aaaaaaa', 'token': '1111-aaaaa', }), ('password_reset_complete', [], {}), ] for name, args, kwargs in expected_named_urls: try: reverse(name, args=args, kwargs=kwargs) except NoReverseMatch: self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name) class PasswordResetTest(AuthViewsTestCase): def test_email_not_found(self): "Error is raised if the provided email address isn't currently registered" response = self.client.get('/password_reset/') self.assertEqual(response.status_code, 200) response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'}) self.assertContainsEscaped(response, PasswordResetForm.error_messages['unknown']) self.assertEqual(len(mail.outbox), 0) def test_email_found(self): "Email is sent if a valid email address is provided for password reset" response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) def test_email_found_custom_from(self): "Email is sent if a valid email address is provided for password reset when a custom from_email is provided." response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertEqual("staffmember@example.com", mail.outbox[0].from_email) def test_admin_reset(self): "If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override." response = self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='adminsite.com' ) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://adminsite.com" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) def test_poisoned_http_host(self): "Poisoned HTTP_HOST headers can't be used for reset emails" # This attack is based on the way browsers handle URLs. The colon # should be used to separate the port, but if the URL contains an @, # the colon is interpreted as part of a username for login purposes, # making 'evil.com' the request domain. Since HTTP_HOST is used to # produce a meaningful reset URL, we need to be certain that the # HTTP_HOST header isn't poisoned. This is done as a check when get_host() # is invoked, but we check here as a practical consequence. with self.assertRaises(SuspiciousOperation): self.client.post('/password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertEqual(len(mail.outbox), 0) def test_poisoned_http_host_admin_site(self): "Poisoned HTTP_HOST headers can't be used for reset emails on admin views" with self.assertRaises(SuspiciousOperation): self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertEqual(len(mail.outbox), 0) def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertTrue(urlmatch is not None, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertEqual(response.status_code, 200) self.assertTrue("Please enter your new password" in response.content) def test_confirm_invalid(self): url, path = self._test_confirm_start() # Let's munge the token in the path, but keep the same length, # in case the URLconf will reject a different length. path = path[:-5] + ("0" * 4) + path[-1] response = self.client.get(path) self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_invalid_user(self): # Ensure that we get a 200 response for a non-existant user, not a 404 response = self.client.get('/reset/123456-1-1/') self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_overflow_user(self): # Ensure that we get a 200 response for a base36 user id that overflows int response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/') self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_invalid_post(self): # Same as test_confirm_invalid, but trying # to do a POST instead. url, path = self._test_confirm_start() path = path[:-5] + ("0" * 4) + path[-1] self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': ' anewpassword', }) # Check the password has not been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(not u.check_password("anewpassword")) def test_confirm_complete(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) # It redirects us to a 'complete' page: self.assertEqual(response.status_code, 302) # Check the password has been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(u.check_password("anewpassword")) # Check we can't use the link again response = self.client.get(path) self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_different_passwords(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'x'}) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) class ChangePasswordTest(AuthViewsTestCase): def fail_login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, AuthenticationForm.error_messages['invalid_login']) def logout(self): response = self.client.get('/logout/') def test_password_change_fails_with_invalid_old_password(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'donuts', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, PasswordChangeForm.error_messages['password_incorrect']) def test_password_change_fails_with_mismatched_passwords(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'donuts', }) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) def test_password_change_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) self.fail_login() self.login(password='password1') def test_password_change_done_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) def test_password_change_done_fails(self): with self.settings(LOGIN_URL='/login/'): response = self.client.get('/password_change/done/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/?next=/password_change/done/')) class LoginTest(AuthViewsTestCase): def test_current_site_in_context_after_login(self): response = self.client.get(reverse('django.contrib.auth.views.login')) self.assertEqual(response.status_code, 200) if Site._meta.installed: site = Site.objects.get_current() self.assertEqual(response.context['site'], site) self.assertEqual(response.context['site_name'], site.name) else: self.assertIsInstance(response.context['site'], RequestSite) self.assertTrue(isinstance(response.context['form'], AuthenticationForm), 'Login form is not an AuthenticationForm') def test_security_check(self, password='password'): login_url = reverse('django.contrib.auth.views.login') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urllib.quote(bad_url), } response = self.client.post(nasty_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urllib.quote(good_url), } response = self.client.post(safe_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) class LoginURLSettings(AuthViewsTestCase): def setUp(self): super(LoginURLSettings, self).setUp() self.old_LOGIN_URL = settings.LOGIN_URL def tearDown(self): super(LoginURLSettings, self).tearDown() settings.LOGIN_URL = self.old_LOGIN_URL def get_login_required_url(self, login_url): settings.LOGIN_URL = login_url response = self.client.get('/login_required/') self.assertEqual(response.status_code, 302) return response['Location'] def test_standard_login_url(self): login_url = '/login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver%s?%s' % (login_url, querystring.urlencode('/'))) def test_remote_login_url(self): login_url = 'http://remote.example.com/login' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_https_login_url(self): login_url = 'https:///login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_login_url_with_querystring(self): login_url = '/login/?pretty=1' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('pretty=1', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver/login/?%s' % querystring.urlencode('/')) def test_remote_login_url_with_next_querystring(self): login_url = 'http://remote.example.com/login/' login_required_url = self.get_login_required_url('%s?next=/default/' % login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) class LogoutTest(AuthViewsTestCase): def confirm_logged_out(self): self.assertTrue(SESSION_KEY not in self.client.session) def test_logout_default(self): "Logout without next_page option renders the default template" self.login() response = self.client.get('/logout/') self.assertEqual(200, response.status_code) self.assertTrue('Logged out' in response.content) self.confirm_logged_out() def test_14377(self): # Bug 14377 self.login() response = self.client.get('/logout/') self.assertTrue('site' in response.context) def test_logout_with_overridden_redirect_url(self): # Bug 11223 self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) response = self.client.get('/logout/next_page/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_next_page_specified(self): "Logout with next_page option given redirects to specified resource" self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_logout_with_redirect_argument(self): "Logout with query string redirects to specified resource" self.login() response = self.client.get('/logout/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_custom_redirect_argument(self): "Logout with custom query string redirects to specified resource" self.login() response = self.client.get('/logout/custom_query/?follow=/somewhere/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_security_check(self, password='password'): logout_url = reverse('django.contrib.auth.views.logout') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urllib.quote(bad_url), } self.login() response = self.client.get(nasty_url) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) self.confirm_logged_out() # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urllib.quote(good_url), } self.login() response = self.client.get(safe_url) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) self.confirm_logged_out()
./CrossVul/dataset_final_sorted/CWE-20/py/good_3766_1
crossvul-python_data_bad_3768_2
import urlparse from django.conf import settings from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, QueryDict from django.shortcuts import render_to_response from django.template import RequestContext from django.utils.http import base36_to_int from django.utils.translation import ugettext as _ from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_protect # Avoid shadowing the login() and logout() views below. from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm, SetPasswordForm, PasswordChangeForm from django.contrib.auth.models import User from django.contrib.auth.tokens import default_token_generator from django.contrib.sites.models import get_current_site @csrf_protect @never_cache def login(request, template_name='registration/login.html', redirect_field_name=REDIRECT_FIELD_NAME, authentication_form=AuthenticationForm, current_app=None, extra_context=None): """ Displays the login form and handles the login action. """ redirect_to = request.REQUEST.get(redirect_field_name, '') if request.method == "POST": form = authentication_form(data=request.POST) if form.is_valid(): netloc = urlparse.urlparse(redirect_to)[1] # Use default setting if redirect_to is empty if not redirect_to: redirect_to = settings.LOGIN_REDIRECT_URL # Security check -- don't allow redirection to a different # host. elif netloc and netloc != request.get_host(): redirect_to = settings.LOGIN_REDIRECT_URL # Okay, security checks complete. Log the user in. auth_login(request, form.get_user()) if request.session.test_cookie_worked(): request.session.delete_test_cookie() return HttpResponseRedirect(redirect_to) else: form = authentication_form(request) request.session.set_test_cookie() current_site = get_current_site(request) context = { 'form': form, redirect_field_name: redirect_to, 'site': current_site, 'site_name': current_site.name, } context.update(extra_context or {}) return render_to_response(template_name, context, context_instance=RequestContext(request, current_app=current_app)) def logout(request, next_page=None, template_name='registration/logged_out.html', redirect_field_name=REDIRECT_FIELD_NAME, current_app=None, extra_context=None): """ Logs out the user and displays 'You are logged out' message. """ auth_logout(request) redirect_to = request.REQUEST.get(redirect_field_name, '') if redirect_to: netloc = urlparse.urlparse(redirect_to)[1] # Security check -- don't allow redirection to a different host. if not (netloc and netloc != request.get_host()): return HttpResponseRedirect(redirect_to) if next_page is None: current_site = get_current_site(request) context = { 'site': current_site, 'site_name': current_site.name, 'title': _('Logged out') } context.update(extra_context or {}) return render_to_response(template_name, context, context_instance=RequestContext(request, current_app=current_app)) else: # Redirect to this page until the session has been cleared. return HttpResponseRedirect(next_page or request.path) def logout_then_login(request, login_url=None, current_app=None, extra_context=None): """ Logs out the user if he is logged in. Then redirects to the log-in page. """ if not login_url: login_url = settings.LOGIN_URL return logout(request, login_url, current_app=current_app, extra_context=extra_context) def redirect_to_login(next, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME): """ Redirects the user to the login page, passing the given 'next' page """ if not login_url: login_url = settings.LOGIN_URL login_url_parts = list(urlparse.urlparse(login_url)) if redirect_field_name: querystring = QueryDict(login_url_parts[4], mutable=True) querystring[redirect_field_name] = next login_url_parts[4] = querystring.urlencode(safe='/') return HttpResponseRedirect(urlparse.urlunparse(login_url_parts)) # 4 views for password reset: # - password_reset sends the mail # - password_reset_done shows a success message for the above # - password_reset_confirm checks the link the user clicked and # prompts for a new password # - password_reset_complete shows a success message for the above @csrf_protect def password_reset(request, is_admin_site=False, template_name='registration/password_reset_form.html', email_template_name='registration/password_reset_email.html', password_reset_form=PasswordResetForm, token_generator=default_token_generator, post_reset_redirect=None, from_email=None, current_app=None, extra_context=None): if post_reset_redirect is None: post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done') if request.method == "POST": form = password_reset_form(request.POST) if form.is_valid(): opts = { 'use_https': request.is_secure(), 'token_generator': token_generator, 'from_email': from_email, 'email_template_name': email_template_name, 'request': request, } if is_admin_site: opts = dict(opts, domain_override=request.META['HTTP_HOST']) form.save(**opts) return HttpResponseRedirect(post_reset_redirect) else: form = password_reset_form() context = { 'form': form, } context.update(extra_context or {}) return render_to_response(template_name, context, context_instance=RequestContext(request, current_app=current_app)) def password_reset_done(request, template_name='registration/password_reset_done.html', current_app=None, extra_context=None): context = {} context.update(extra_context or {}) return render_to_response(template_name, context, context_instance=RequestContext(request, current_app=current_app)) # Doesn't need csrf_protect since no-one can guess the URL @never_cache def password_reset_confirm(request, uidb36=None, token=None, template_name='registration/password_reset_confirm.html', token_generator=default_token_generator, set_password_form=SetPasswordForm, post_reset_redirect=None, current_app=None, extra_context=None): """ View that checks the hash in a password reset link and presents a form for entering a new password. """ assert uidb36 is not None and token is not None # checked by URLconf if post_reset_redirect is None: post_reset_redirect = reverse('django.contrib.auth.views.password_reset_complete') try: uid_int = base36_to_int(uidb36) user = User.objects.get(id=uid_int) except (ValueError, User.DoesNotExist): user = None if user is not None and token_generator.check_token(user, token): validlink = True if request.method == 'POST': form = set_password_form(user, request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(post_reset_redirect) else: form = set_password_form(None) else: validlink = False form = None context = { 'form': form, 'validlink': validlink, } context.update(extra_context or {}) return render_to_response(template_name, context, context_instance=RequestContext(request, current_app=current_app)) def password_reset_complete(request, template_name='registration/password_reset_complete.html', current_app=None, extra_context=None): context = { 'login_url': settings.LOGIN_URL } context.update(extra_context or {}) return render_to_response(template_name, context, context_instance=RequestContext(request, current_app=current_app)) @csrf_protect @login_required def password_change(request, template_name='registration/password_change_form.html', post_change_redirect=None, password_change_form=PasswordChangeForm, current_app=None, extra_context=None): if post_change_redirect is None: post_change_redirect = reverse('django.contrib.auth.views.password_change_done') if request.method == "POST": form = password_change_form(user=request.user, data=request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(post_change_redirect) else: form = password_change_form(user=request.user) context = { 'form': form, } context.update(extra_context or {}) return render_to_response(template_name, context, context_instance=RequestContext(request, current_app=current_app)) def password_change_done(request, template_name='registration/password_change_done.html', current_app=None, extra_context=None): context = {} context.update(extra_context or {}) return render_to_response(template_name, context, context_instance=RequestContext(request, current_app=current_app))
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3768_2
crossvul-python_data_good_3660_1
# Copyright 2011 OpenStack LLC. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" import urllib from xml.dom import minidom import webob from webob import exc from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging from nova.openstack.common import excutils from nova.openstack.common import importutils from nova import quota from nova import utils LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS QUOTAS = quota.QUOTAS authorize = extensions.extension_authorizer('compute', 'security_groups') def make_rule(elem): elem.set('id') elem.set('parent_group_id') proto = xmlutil.SubTemplateElement(elem, 'ip_protocol') proto.text = 'ip_protocol' from_port = xmlutil.SubTemplateElement(elem, 'from_port') from_port.text = 'from_port' to_port = xmlutil.SubTemplateElement(elem, 'to_port') to_port.text = 'to_port' group = xmlutil.SubTemplateElement(elem, 'group', selector='group') name = xmlutil.SubTemplateElement(group, 'name') name.text = 'name' tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id') tenant_id.text = 'tenant_id' ip_range = xmlutil.SubTemplateElement(elem, 'ip_range', selector='ip_range') cidr = xmlutil.SubTemplateElement(ip_range, 'cidr') cidr.text = 'cidr' def make_sg(elem): elem.set('id') elem.set('tenant_id') elem.set('name') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' rules = xmlutil.SubTemplateElement(elem, 'rules') rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules') make_rule(rule) sg_nsmap = {None: wsgi.XMLNS_V11} class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group_rule', selector='security_group_rule') make_rule(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group', selector='security_group') make_sg(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_groups') elem = xmlutil.SubTemplateElement(root, 'security_group', selector='security_groups') make_sg(elem) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): self.compute_api = compute.API() self.sgh = importutils.import_object(FLAGS.security_group_handler) def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule.id sg_rule['parent_group_id'] = rule.parent_group_id sg_rule['ip_protocol'] = rule.protocol sg_rule['from_port'] = rule.from_port sg_rule['to_port'] = rule.to_port sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule.group_id: source_group = db.security_group_get(context, rule.group_id) sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: sg_rule['ip_range'] = {'cidr': rule.cidr} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group.id security_group['description'] = group.description security_group['name'] = group.name security_group['tenant_id'] = group.project_id security_group['rules'] = [] for rule in group.rules: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group class SecurityGroupController(SecurityGroupControllerBase): """The Security group API controller for the OpenStack API.""" def _get_security_group(self, context, id): try: id = int(id) security_group = db.security_group_get(context, id) except ValueError: msg = _("Security group id should be integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) return security_group @wsgi.serializers(xml=SecurityGroupTemplate) def show(self, req, id): """Return data about the given security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) if db.security_group_in_use(context, security_group.id): msg = _("Security group is still in use") raise exc.HTTPBadRequest(explanation=msg) # Get reservations try: reservations = QUOTAS.reserve(context, security_groups=-1) except Exception: reservations = None LOG.exception(_("Failed to update usages deallocating " "security group")) LOG.audit(_("Delete security group %s"), id, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh( context, security_group.id) # Commit the reservations if reservations: QUOTAS.commit(context, reservations) return webob.Response(status_int=202) @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req): """Returns a list of security groups""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) groups = db.security_group_get_by_project(context, context.project_id) limited_list = common.limited(groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @wsgi.serializers(xml=SecurityGroupTemplate) @wsgi.deserializers(xml=SecurityGroupXMLDeserializer) def create(self, req, body): """Creates a new security group.""" context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() security_group = body.get('security_group', None) if security_group is None: raise exc.HTTPUnprocessableEntity() group_name = security_group.get('name', None) group_description = security_group.get('description', None) self._validate_security_group_property(group_name, "name") self._validate_security_group_property(group_description, "description") group_name = group_name.strip() group_description = group_description.strip() try: reservations = QUOTAS.reserve(context, security_groups=1) except exception.OverQuota: msg = _("Quota exceeded, too many security groups.") raise exc.HTTPBadRequest(explanation=msg) try: LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('Security group %s already exists') % group_name raise exc.HTTPBadRequest(explanation=msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) # Commit the reservation QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) return {'security_group': self._format_security_group(context, group_ref)} def _validate_security_group_property(self, value, typ): """ typ will be either 'name' or 'description', depending on the caller """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % typ raise exc.HTTPBadRequest(explanation=msg) if not val: msg = _("Security group %s cannot be empty.") % typ raise exc.HTTPBadRequest(explanation=msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) class SecurityGroupRulesController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupRuleTemplate) @wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer) def create(self, req, body): context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() if not 'security_group_rule' in body: raise exc.HTTPUnprocessableEntity() self.compute_api.ensure_default_security_group(context) sg_rule = body['security_group_rule'] parent_group_id = sg_rule.get('parent_group_id', None) try: parent_group_id = int(parent_group_id) security_group = db.security_group_get(context, parent_group_id) except ValueError: msg = _("Parent group id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Security group (%s) not found") % parent_group_id raise exc.HTTPNotFound(explanation=msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) try: values = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), parent_group_id=sg_rule.get('parent_group_id'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if values is None: msg = _("Not enough parameters to build a " "valid rule.") raise exc.HTTPBadRequest(explanation=msg) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): msg = _('This rule already exists in group %s') % parent_group_id raise exc.HTTPBadRequest(explanation=msg) count = QUOTAS.count(context, 'security_group_rules', parent_group_id) try: QUOTAS.limit_check(context, security_group_rules=count + 1) except exception.OverQuota: msg = _("Quota exceeded, too many security group rules.") raise exc.HTTPBadRequest(explanation=msg) security_group_rule = db.security_group_rule_create(context, values) self.sgh.trigger_security_group_rule_create_refresh( context, [security_group_rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return True return False def _rule_args_to_dict(self, context, to_port=None, from_port=None, parent_group_id=None, ip_protocol=None, cidr=None, group_id=None): values = {} if group_id is not None: try: parent_group_id = int(parent_group_id) group_id = int(group_id) except ValueError: msg = _("Parent or group id is not integer") raise exception.InvalidInput(reason=msg) values['group_id'] = group_id #check if groupId exists db.security_group_get(context, group_id) elif cidr: # If this fails, it throws an exception. This is what we want. try: cidr = urllib.unquote(cidr).decode() except Exception: raise exception.InvalidCidr(cidr=cidr) if not utils.is_valid_cidr(cidr): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr) values['cidr'] = cidr else: values['cidr'] = '0.0.0.0/0' if group_id: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and from_port > to_port): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol.lower() values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def delete(self, req, id): context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: id = int(id) rule = db.security_group_rule_get(context, id) except ValueError: msg = _("Rule id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound: msg = _("Rule (%s) not found") % id raise exc.HTTPNotFound(explanation=msg) group_id = rule.parent_group_id self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get(context, group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, [rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return webob.Response(status_int=202) class ServerSecurityGroupController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: instance = self.compute_api.get(context, server_id) groups = db.security_group_get_by_instance(context, instance['id']) except exception.ApiError, e: raise webob.exc.HTTPBadRequest(explanation=e.message) except exception.NotAuthorized, e: raise webob.exc.HTTPUnauthorized() result = [self._format_security_group(context, group) for group in groups] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} class SecurityGroupActionController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupActionController, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.sgh = importutils.import_object(FLAGS.security_group_handler) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['addSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.add_security_group(context, instance, group_name) self.sgh.trigger_instance_add_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['removeSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.remove_security_group(context, instance, group_name) self.sgh.trigger_instance_remove_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) class Security_groups(extensions.ExtensionDescriptor): """Security group support""" name = "SecurityGroups" alias = "security_groups" namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" updated = "2011-07-21T00:00:00+00:00" def get_controller_extensions(self): controller = SecurityGroupActionController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): resources = [] res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController()) resources.append(res) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController()) resources.append(res) res = extensions.ResourceExtension( 'os-security-groups', controller=ServerSecurityGroupController(), parent=dict(member_name='server', collection_name='servers')) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-20/py/good_3660_1
crossvul-python_data_good_2156_0
# # The Python Imaging Library. # $Id$ # # Mac OS X icns file decoder, based on icns.py by Bob Ippolito. # # history: # 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies. # # Copyright (c) 2004 by Bob Ippolito. # Copyright (c) 2004 by Secret Labs. # Copyright (c) 2004 by Fredrik Lundh. # Copyright (c) 2014 by Alastair Houghton. # # See the README file for information on usage and redistribution. # from PIL import Image, ImageFile, PngImagePlugin, _binary import struct, io enable_jpeg2k = hasattr(Image.core, 'jp2klib_version') if enable_jpeg2k: from PIL import Jpeg2KImagePlugin i8 = _binary.i8 HEADERSIZE = 8 def nextheader(fobj): return struct.unpack('>4sI', fobj.read(HEADERSIZE)) def read_32t(fobj, start_length, size): # The 128x128 icon seems to have an extra header for some reason. (start, length) = start_length fobj.seek(start) sig = fobj.read(4) if sig != b'\x00\x00\x00\x00': raise SyntaxError('Unknown signature, expecting 0x00000000') return read_32(fobj, (start + 4, length - 4), size) def read_32(fobj, start_length, size): """ Read a 32bit RGB icon resource. Seems to be either uncompressed or an RLE packbits-like scheme. """ (start, length) = start_length fobj.seek(start) pixel_size = (size[0] * size[2], size[1] * size[2]) sizesq = pixel_size[0] * pixel_size[1] if length == sizesq * 3: # uncompressed ("RGBRGBGB") indata = fobj.read(length) im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1) else: # decode image im = Image.new("RGB", pixel_size, None) for band_ix in range(3): data = [] bytesleft = sizesq while bytesleft > 0: byte = fobj.read(1) if not byte: break byte = i8(byte) if byte & 0x80: blocksize = byte - 125 byte = fobj.read(1) for i in range(blocksize): data.append(byte) else: blocksize = byte + 1 data.append(fobj.read(blocksize)) bytesleft -= blocksize if bytesleft <= 0: break if bytesleft != 0: raise SyntaxError( "Error reading channel [%r left]" % bytesleft ) band = Image.frombuffer( "L", pixel_size, b"".join(data), "raw", "L", 0, 1 ) im.im.putband(band.im, band_ix) return {"RGB": im} def read_mk(fobj, start_length, size): # Alpha masks seem to be uncompressed (start, length) = start_length fobj.seek(start) pixel_size = (size[0] * size[2], size[1] * size[2]) sizesq = pixel_size[0] * pixel_size[1] band = Image.frombuffer( "L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1 ) return {"A": band} def read_png_or_jpeg2000(fobj, start_length, size): (start, length) = start_length fobj.seek(start) sig = fobj.read(12) if sig[:8] == b'\x89PNG\x0d\x0a\x1a\x0a': fobj.seek(start) im = PngImagePlugin.PngImageFile(fobj) return {"RGBA": im} elif sig[:4] == b'\xff\x4f\xff\x51' \ or sig[:4] == b'\x0d\x0a\x87\x0a' \ or sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a': if not enable_jpeg2k: raise ValueError('Unsupported icon subimage format (rebuild PIL with JPEG 2000 support to fix this)') # j2k, jpc or j2c fobj.seek(start) jp2kstream = fobj.read(length) f = io.BytesIO(jp2kstream) im = Jpeg2KImagePlugin.Jpeg2KImageFile(f) if im.mode != 'RGBA': im = im.convert('RGBA') return {"RGBA": im} else: raise ValueError('Unsupported icon subimage format') class IcnsFile: SIZES = { (512, 512, 2): [ (b'ic10', read_png_or_jpeg2000), ], (512, 512, 1): [ (b'ic09', read_png_or_jpeg2000), ], (256, 256, 2): [ (b'ic14', read_png_or_jpeg2000), ], (256, 256, 1): [ (b'ic08', read_png_or_jpeg2000), ], (128, 128, 2): [ (b'ic13', read_png_or_jpeg2000), ], (128, 128, 1): [ (b'ic07', read_png_or_jpeg2000), (b'it32', read_32t), (b't8mk', read_mk), ], (64, 64, 1): [ (b'icp6', read_png_or_jpeg2000), ], (32, 32, 2): [ (b'ic12', read_png_or_jpeg2000), ], (48, 48, 1): [ (b'ih32', read_32), (b'h8mk', read_mk), ], (32, 32, 1): [ (b'icp5', read_png_or_jpeg2000), (b'il32', read_32), (b'l8mk', read_mk), ], (16, 16, 2): [ (b'ic11', read_png_or_jpeg2000), ], (16, 16, 1): [ (b'icp4', read_png_or_jpeg2000), (b'is32', read_32), (b's8mk', read_mk), ], } def __init__(self, fobj): """ fobj is a file-like object as an icns resource """ # signature : (start, length) self.dct = dct = {} self.fobj = fobj sig, filesize = nextheader(fobj) if sig != b'icns': raise SyntaxError('not an icns file') i = HEADERSIZE while i < filesize: sig, blocksize = nextheader(fobj) if blocksize <= 0: raise SyntaxError('invalid block header') i += HEADERSIZE blocksize -= HEADERSIZE dct[sig] = (i, blocksize) fobj.seek(blocksize, 1) i += blocksize def itersizes(self): sizes = [] for size, fmts in self.SIZES.items(): for (fmt, reader) in fmts: if fmt in self.dct: sizes.append(size) break return sizes def bestsize(self): sizes = self.itersizes() if not sizes: raise SyntaxError("No 32bit icon resources found") return max(sizes) def dataforsize(self, size): """ Get an icon resource as {channel: array}. Note that the arrays are bottom-up like windows bitmaps and will likely need to be flipped or transposed in some way. """ dct = {} for code, reader in self.SIZES[size]: desc = self.dct.get(code) if desc is not None: dct.update(reader(self.fobj, desc, size)) return dct def getimage(self, size=None): if size is None: size = self.bestsize() if len(size) == 2: size = (size[0], size[1], 1) channels = self.dataforsize(size) im = channels.get('RGBA', None) if im: return im im = channels.get("RGB").copy() try: im.putalpha(channels["A"]) except KeyError: pass return im ## # Image plugin for Mac OS icons. class IcnsImageFile(ImageFile.ImageFile): """ PIL read-only image support for Mac OS .icns files. Chooses the best resolution, but will possibly load a different size image if you mutate the size attribute before calling 'load'. The info dictionary has a key 'sizes' that is a list of sizes that the icns file has. """ format = "ICNS" format_description = "Mac OS icns resource" def _open(self): self.icns = IcnsFile(self.fp) self.mode = 'RGBA' self.best_size = self.icns.bestsize() self.size = (self.best_size[0] * self.best_size[2], self.best_size[1] * self.best_size[2]) self.info['sizes'] = self.icns.itersizes() # Just use this to see if it's loaded or not yet. self.tile = ('',) def load(self): if len(self.size) == 3: self.best_size = self.size self.size = (self.best_size[0] * self.best_size[2], self.best_size[1] * self.best_size[2]) Image.Image.load(self) if not self.tile: return self.load_prepare() # This is likely NOT the best way to do it, but whatever. im = self.icns.getimage(self.best_size) # If this is a PNG or JPEG 2000, it won't be loaded yet im.load() self.im = im.im self.mode = im.mode self.size = im.size self.fp = None self.icns = None self.tile = () self.load_end() Image.register_open("ICNS", IcnsImageFile, lambda x: x[:4] == b'icns') Image.register_extension("ICNS", '.icns') if __name__ == '__main__': import os, sys imf = IcnsImageFile(open(sys.argv[1], 'rb')) for size in imf.info['sizes']: imf.size = size imf.load() im = imf.im im.save('out-%s-%s-%s.png' % size) im = Image.open(open(sys.argv[1], "rb")) im.save("out.png") if sys.platform == 'windows': os.startfile("out.png")
./CrossVul/dataset_final_sorted/CWE-20/py/good_2156_0
crossvul-python_data_bad_1376_3
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from thrift.Thrift import * class TProtocolException(TException): """Custom Protocol Exception class""" UNKNOWN = 0 INVALID_DATA = 1 NEGATIVE_SIZE = 2 SIZE_LIMIT = 3 BAD_VERSION = 4 INVALID_PROTOCOL = 5 MISSING_REQUIRED_FIELD = 6 def __init__(self, type=UNKNOWN, message=None): TException.__init__(self, message) self.type = type class TProtocolBase: """Base class for Thrift protocol driver.""" def __init__(self, trans): self.trans = trans def writeMessageBegin(self, name, ttype, seqid): pass def writeMessageEnd(self): pass def writeStructBegin(self, name): pass def writeStructEnd(self): pass def writeUnionBegin(self, name): self.writeStructBegin(name) def writeUnionEnd(self): self.writeStructEnd() def writeFieldBegin(self, name, type, id): pass def writeFieldEnd(self): pass def writeFieldStop(self): pass def writeMapBegin(self, ktype, vtype, size): pass def writeMapEnd(self): pass def writeListBegin(self, etype, size): pass def writeListEnd(self): pass def writeSetBegin(self, etype, size): pass def writeSetEnd(self): pass def writeBool(self, bool_val): pass def writeByte(self, byte): pass def writeI16(self, i16): pass def writeI32(self, i32): pass def writeI64(self, i64): pass def writeDouble(self, dub): pass def writeFloat(self, flt): pass def writeString(self, str): pass def readMessageBegin(self): pass def readMessageEnd(self): pass def readStructBegin(self): pass def readStructEnd(self): pass def readFieldBegin(self): pass def readFieldEnd(self): pass def readMapBegin(self): pass def readMapEnd(self): pass def readListBegin(self): pass def readListEnd(self): pass def readSetBegin(self): pass def readSetEnd(self): pass def readBool(self): pass def readByte(self): pass def readI16(self): pass def readI32(self): pass def readI64(self): pass def readDouble(self): pass def readFloat(self): pass def readString(self): pass def skip(self, type): if type == TType.STOP: return elif type == TType.BOOL: self.readBool() elif type == TType.BYTE: self.readByte() elif type == TType.I16: self.readI16() elif type == TType.I32: self.readI32() elif type == TType.I64: self.readI64() elif type == TType.DOUBLE: self.readDouble() elif type == TType.FLOAT: self.readFloat() elif type == TType.STRING: self.readString() elif type == TType.STRUCT: name = self.readStructBegin() while True: (name, type, id) = self.readFieldBegin() if type == TType.STOP: break self.skip(type) self.readFieldEnd() self.readStructEnd() elif type == TType.MAP: (ktype, vtype, size) = self.readMapBegin() for _ in range(size): self.skip(ktype) self.skip(vtype) self.readMapEnd() elif type == TType.SET: (etype, size) = self.readSetBegin() for _ in range(size): self.skip(etype) self.readSetEnd() elif type == TType.LIST: (etype, size) = self.readListBegin() for _ in range(size): self.skip(etype) self.readListEnd() def readIntegral(self, type): if type == TType.BOOL: return self.readBool() elif type == TType.BYTE: return self.readByte() elif type == TType.I16: return self.readI16() elif type == TType.I32: return self.readI32() elif type == TType.I64: return self.readI64() else: raise Exception("Unknown integral type: %s" % str(type)) def readFloatingPoint(self, type): if type == TType.FLOAT: return self.readFloat() elif type == TType.DOUBLE: return self.readDouble() else: raise Exception("Unknown floating point type: %s" % str(type)) class TProtocolFactory: def getProtocol(self, trans): pass
./CrossVul/dataset_final_sorted/CWE-20/py/bad_1376_3
crossvul-python_data_good_872_3
""" Helpers for URI and method injection tests. @see: U{CVE-2019-12387} """ import string UNPRINTABLE_ASCII = ( frozenset(range(0, 128)) - frozenset(bytearray(string.printable, 'ascii')) ) NONASCII = frozenset(range(128, 256)) class MethodInjectionTestsMixin(object): """ A mixin that runs HTTP method injection tests. Define L{MethodInjectionTestsMixin.attemptRequestWithMaliciousMethod} in a L{twisted.trial.unittest.SynchronousTestCase} subclass to test how HTTP client code behaves when presented with malicious HTTP methods. @see: U{CVE-2019-12387} """ def attemptRequestWithMaliciousMethod(self, method): """ Attempt to send a request with the given method. This should synchronously raise a L{ValueError} if either is invalid. @param method: the method (e.g. C{GET\x00}) @param uri: the URI @type method: """ raise NotImplementedError() def test_methodWithCLRFRejected(self): """ Issuing a request with a method that contains a carriage return and line feed fails with a L{ValueError}. """ with self.assertRaises(ValueError) as cm: method = b"GET\r\nX-Injected-Header: value" self.attemptRequestWithMaliciousMethod(method) self.assertRegex(str(cm.exception), "^Invalid method") def test_methodWithUnprintableASCIIRejected(self): """ Issuing a request with a method that contains unprintable ASCII characters fails with a L{ValueError}. """ for c in UNPRINTABLE_ASCII: method = b"GET%s" % (bytearray([c]),) with self.assertRaises(ValueError) as cm: self.attemptRequestWithMaliciousMethod(method) self.assertRegex(str(cm.exception), "^Invalid method") def test_methodWithNonASCIIRejected(self): """ Issuing a request with a method that contains non-ASCII characters fails with a L{ValueError}. """ for c in NONASCII: method = b"GET%s" % (bytearray([c]),) with self.assertRaises(ValueError) as cm: self.attemptRequestWithMaliciousMethod(method) self.assertRegex(str(cm.exception), "^Invalid method") class URIInjectionTestsMixin(object): """ A mixin that runs HTTP URI injection tests. Define L{MethodInjectionTestsMixin.attemptRequestWithMaliciousURI} in a L{twisted.trial.unittest.SynchronousTestCase} subclass to test how HTTP client code behaves when presented with malicious HTTP URIs. """ def attemptRequestWithMaliciousURI(self, method): """ Attempt to send a request with the given URI. This should synchronously raise a L{ValueError} if either is invalid. @param uri: the URI. @type method: """ raise NotImplementedError() def test_hostWithCRLFRejected(self): """ Issuing a request with a URI whose host contains a carriage return and line feed fails with a L{ValueError}. """ with self.assertRaises(ValueError) as cm: uri = b"http://twisted\r\n.invalid/path" self.attemptRequestWithMaliciousURI(uri) self.assertRegex(str(cm.exception), "^Invalid URI") def test_hostWithWithUnprintableASCIIRejected(self): """ Issuing a request with a URI whose host contains unprintable ASCII characters fails with a L{ValueError}. """ for c in UNPRINTABLE_ASCII: uri = b"http://twisted%s.invalid/OK" % (bytearray([c]),) with self.assertRaises(ValueError) as cm: self.attemptRequestWithMaliciousURI(uri) self.assertRegex(str(cm.exception), "^Invalid URI") def test_hostWithNonASCIIRejected(self): """ Issuing a request with a URI whose host contains non-ASCII characters fails with a L{ValueError}. """ for c in NONASCII: uri = b"http://twisted%s.invalid/OK" % (bytearray([c]),) with self.assertRaises(ValueError) as cm: self.attemptRequestWithMaliciousURI(uri) self.assertRegex(str(cm.exception), "^Invalid URI") def test_pathWithCRLFRejected(self): """ Issuing a request with a URI whose path contains a carriage return and line feed fails with a L{ValueError}. """ with self.assertRaises(ValueError) as cm: uri = b"http://twisted.invalid/\r\npath" self.attemptRequestWithMaliciousURI(uri) self.assertRegex(str(cm.exception), "^Invalid URI") def test_pathWithWithUnprintableASCIIRejected(self): """ Issuing a request with a URI whose path contains unprintable ASCII characters fails with a L{ValueError}. """ for c in UNPRINTABLE_ASCII: uri = b"http://twisted.invalid/OK%s" % (bytearray([c]),) with self.assertRaises(ValueError) as cm: self.attemptRequestWithMaliciousURI(uri) self.assertRegex(str(cm.exception), "^Invalid URI") def test_pathWithNonASCIIRejected(self): """ Issuing a request with a URI whose path contains non-ASCII characters fails with a L{ValueError}. """ for c in NONASCII: uri = b"http://twisted.invalid/OK%s" % (bytearray([c]),) with self.assertRaises(ValueError) as cm: self.attemptRequestWithMaliciousURI(uri) self.assertRegex(str(cm.exception), "^Invalid URI")
./CrossVul/dataset_final_sorted/CWE-20/py/good_872_3
crossvul-python_data_bad_2816_2
# -*- coding: utf-8 -*- ''' Zeromq transport classes ''' # Import Python Libs from __future__ import absolute_import import os import sys import copy import errno import signal import hashlib import logging import weakref from random import randint # Import Salt Libs import salt.auth import salt.crypt import salt.utils import salt.utils.verify import salt.utils.event import salt.payload import salt.transport.client import salt.transport.server import salt.transport.mixins.auth from salt.exceptions import SaltReqTimeoutError import zmq import zmq.error import zmq.eventloop.ioloop # support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'): zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop import zmq.eventloop.zmqstream try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False # Import Tornado Libs import tornado import tornado.gen import tornado.concurrent # Import third party libs import salt.ext.six as six try: from Cryptodome.Cipher import PKCS1_OAEP except ImportError: from Crypto.Cipher import PKCS1_OAEP log = logging.getLogger(__name__) class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel): ''' Encapsulate sending routines to ZeroMQ. ZMQ Channels default to 'crypt=aes' ''' # This class is only a singleton per minion/master pair # mapping of io_loop -> {key -> channel} instance_map = weakref.WeakKeyDictionary() def __new__(cls, opts, **kwargs): ''' Only create one instance of channel per __key() ''' # do we have any mapping for this io_loop io_loop = kwargs.get('io_loop') if io_loop is None: zmq.eventloop.ioloop.install() io_loop = tornado.ioloop.IOLoop.current() if io_loop not in cls.instance_map: cls.instance_map[io_loop] = weakref.WeakValueDictionary() loop_instance_map = cls.instance_map[io_loop] key = cls.__key(opts, **kwargs) obj = loop_instance_map.get(key) if obj is None: log.debug('Initializing new AsyncZeroMQReqChannel for {0}'.format(key)) # we need to make a local variable for this, as we are going to store # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller obj = object.__new__(cls) obj.__singleton_init__(opts, **kwargs) loop_instance_map[key] = obj log.trace('Inserted key into loop_instance_map id {0} for key {1} and process {2}'.format(id(loop_instance_map), key, os.getpid())) else: log.debug('Re-using AsyncZeroMQReqChannel for {0}'.format(key)) return obj def __deepcopy__(self, memo): cls = self.__class__ result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args memo[id(self)] = result for key in self.__dict__: if key in ('_io_loop',): continue # The _io_loop has a thread Lock which will fail to be deep # copied. Skip it because it will just be recreated on the # new copy. if key == 'message_client': # Recreate the message client because it will fail to be deep # copied. The reason is the same as the io_loop skip above. setattr(result, key, AsyncReqMessageClientPool(result.opts, args=(result.opts, self.master_uri,), kwargs={'io_loop': self._io_loop})) continue setattr(result, key, copy.deepcopy(self.__dict__[key], memo)) return result @classmethod def __key(cls, opts, **kwargs): return (opts['pki_dir'], # where the keys are stored opts['id'], # minion ID kwargs.get('master_uri', opts.get('master_uri')), # master ID kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt ) # has to remain empty for singletons, since __init__ will *always* be called def __init__(self, opts, **kwargs): pass # an init for the singleton instance to call def __singleton_init__(self, opts, **kwargs): self.opts = dict(opts) self.ttype = 'zeromq' # crypt defaults to 'aes' self.crypt = kwargs.get('crypt', 'aes') if 'master_uri' in kwargs: self.opts['master_uri'] = kwargs['master_uri'] self._io_loop = kwargs.get('io_loop') if self._io_loop is None: zmq.eventloop.ioloop.install() self._io_loop = tornado.ioloop.IOLoop.current() if self.crypt != 'clear': # we don't need to worry about auth as a kwarg, since its a singleton self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop) self.message_client = AsyncReqMessageClientPool(self.opts, args=(self.opts, self.master_uri,), kwargs={'io_loop': self._io_loop}) def __del__(self): ''' Since the message_client creates sockets and assigns them to the IOLoop we have to specifically destroy them, since we aren't the only ones with references to the FDs ''' if hasattr(self, 'message_client'): self.message_client.destroy() else: log.debug('No message_client attr for AsyncZeroMQReqChannel found. Not destroying sockets.') @property def master_uri(self): return self.opts['master_uri'] def _package_load(self, load): return { 'enc': self.crypt, 'load': load, } @tornado.gen.coroutine def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60): if not self.auth.authenticated: # Return controle back to the caller, continue when authentication succeeds yield self.auth.authenticate() # Return control to the caller. When send() completes, resume by populating ret with the Future.result ret = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) key = self.auth.get_keys() cipher = PKCS1_OAEP.new(key) if 'key' not in ret: # Reauth in the case our key is deleted on the master side. yield self.auth.authenticate() ret = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) aes = cipher.decrypt(ret['key']) pcrypt = salt.crypt.Crypticle(self.opts, aes) data = pcrypt.loads(ret[dictkey]) if six.PY3: data = salt.transport.frame.decode_embedded_strs(data) raise tornado.gen.Return(data) @tornado.gen.coroutine def _crypted_transfer(self, load, tries=3, timeout=60, raw=False): ''' Send a load across the wire, with encryption In case of authentication errors, try to renegotiate authentication and retry the method. Indeed, we can fail too early in case of a master restart during a minion state execution call :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing ''' @tornado.gen.coroutine def _do_transfer(): # Yield control to the caller. When send() completes, resume by populating data with the Future.result data = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) # we may not have always data # as for example for saltcall ret submission, this is a blind # communication, we do not subscribe to return events, we just # upload the results to the master if data: data = self.auth.crypticle.loads(data, raw) if six.PY3 and not raw: data = salt.transport.frame.decode_embedded_strs(data) raise tornado.gen.Return(data) if not self.auth.authenticated: # Return control back to the caller, resume when authentication succeeds yield self.auth.authenticate() try: # We did not get data back the first time. Retry. ret = yield _do_transfer() except salt.crypt.AuthenticationError: # If auth error, return control back to the caller, continue when authentication succeeds yield self.auth.authenticate() ret = yield _do_transfer() raise tornado.gen.Return(ret) @tornado.gen.coroutine def _uncrypted_transfer(self, load, tries=3, timeout=60): ''' Send a load across the wire in cleartext :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing ''' ret = yield self.message_client.send( self._package_load(load), timeout=timeout, tries=tries, ) raise tornado.gen.Return(ret) @tornado.gen.coroutine def send(self, load, tries=3, timeout=60, raw=False): ''' Send a request, return a future which will complete when we send the message ''' if self.crypt == 'clear': ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout) else: ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw) raise tornado.gen.Return(ret) class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel): ''' A transport channel backed by ZeroMQ for a Salt Publisher to use to publish commands to connected minions ''' def __init__(self, opts, **kwargs): self.opts = opts self.ttype = 'zeromq' self.io_loop = kwargs.get('io_loop') if self.io_loop is None: zmq.eventloop.ioloop.install() self.io_loop = tornado.ioloop.IOLoop.current() self.hexid = hashlib.sha1(six.b(self.opts['id'])).hexdigest() self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop) self.serial = salt.payload.Serial(self.opts) self.context = zmq.Context() self._socket = self.context.socket(zmq.SUB) if self.opts['zmq_filtering']: # TODO: constants file for "broadcast" self._socket.setsockopt(zmq.SUBSCRIBE, b'broadcast') self._socket.setsockopt(zmq.SUBSCRIBE, self.hexid) else: self._socket.setsockopt(zmq.SUBSCRIBE, b'') self._socket.setsockopt(zmq.IDENTITY, salt.utils.to_bytes(self.opts['id'])) # TODO: cleanup all the socket opts stuff if hasattr(zmq, 'TCP_KEEPALIVE'): self._socket.setsockopt( zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl'] ) recon_delay = self.opts['recon_default'] if self.opts['recon_randomize']: recon_delay = randint(self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'] ) log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format( self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'], recon_delay) ) log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay)) self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay) if hasattr(zmq, 'RECONNECT_IVL_MAX'): log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format( self.opts['recon_default'] + self.opts['recon_max']) ) self._socket.setsockopt( zmq.RECONNECT_IVL_MAX, self.opts['recon_max'] ) if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self._socket.setsockopt(zmq.IPV4ONLY, 0) if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']: self._monitor = ZeroMQSocketMonitor(self._socket) self._monitor.start_io_loop(self.io_loop) def destroy(self): if hasattr(self, '_monitor') and self._monitor is not None: self._monitor.stop() self._monitor = None if hasattr(self, '_stream'): # TODO: Optionally call stream.close() on newer pyzmq? Its broken on some self._stream.io_loop.remove_handler(self._stream.socket) self._stream.socket.close(0) elif hasattr(self, '_socket'): self._socket.close(0) if hasattr(self, 'context') and self.context.closed is False: self.context.term() def __del__(self): self.destroy() # TODO: this is the time to see if we are connected, maybe use the req channel to guess? @tornado.gen.coroutine def connect(self): if not self.auth.authenticated: yield self.auth.authenticate() self.publish_port = self.auth.creds['publish_port'] self._socket.connect(self.master_pub) @property def master_pub(self): ''' Return the master publish port ''' return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'], port=self.publish_port) @tornado.gen.coroutine def _decode_messages(self, messages): ''' Take the zmq messages, decrypt/decode them into a payload :param list messages: A list of messages to be decoded ''' messages_len = len(messages) # if it was one message, then its old style if messages_len == 1: payload = self.serial.loads(messages[0]) # 2 includes a header which says who should do it elif messages_len == 2: if messages[0] not in ('broadcast', self.hexid): log.debug('Publish received for not this minion: {0}'.format(messages[0])) raise tornado.gen.Return(None) payload = self.serial.loads(messages[1]) else: raise Exception(('Invalid number of messages ({0}) in zeromq pub' 'message from master').format(len(messages_len))) # Yield control back to the caller. When the payload has been decoded, assign # the decoded payload to 'ret' and resume operation ret = yield self._decode_payload(payload) raise tornado.gen.Return(ret) @property def stream(self): ''' Return the current zmqstream, creating one if necessary ''' if not hasattr(self, '_stream'): self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop) return self._stream def on_recv(self, callback): ''' Register a callback for received messages (that we didn't initiate) :param func callback: A function which should be called when data is received ''' if callback is None: return self.stream.on_recv(None) @tornado.gen.coroutine def wrap_callback(messages): payload = yield self._decode_messages(messages) if payload is not None: callback(payload) return self.stream.on_recv(wrap_callback) class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel): def __init__(self, opts): salt.transport.server.ReqServerChannel.__init__(self, opts) self._closing = False def zmq_device(self): ''' Multiprocessing target for the zmq queue device ''' self.__setup_signals() salt.utils.appendproctitle('MWorkerQueue') self.context = zmq.Context(self.opts['worker_threads']) # Prepare the zeromq sockets self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts) self.clients = self.context.socket(zmq.ROUTER) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self.clients.setsockopt(zmq.IPV4ONLY, 0) self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000)) if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']: # Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here import threading self._monitor = ZeroMQSocketMonitor(self.clients) t = threading.Thread(target=self._monitor.start_poll) t.start() self.workers = self.context.socket(zmq.DEALER) if self.opts.get('ipc_mode', '') == 'tcp': self.w_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_workers', 4515) ) else: self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Setting up the master communication server') self.clients.bind(self.uri) self.workers.bind(self.w_uri) while True: if self.clients.closed or self.workers.closed: break try: zmq.device(zmq.QUEUE, self.clients, self.workers) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except (KeyboardInterrupt, SystemExit): break def close(self): ''' Cleanly shutdown the router socket ''' if self._closing: return log.info('MWorkerQueue under PID %s is closing', os.getpid()) self._closing = True if hasattr(self, '_monitor') and self._monitor is not None: self._monitor.stop() self._monitor = None if hasattr(self, '_w_monitor') and self._w_monitor is not None: self._w_monitor.stop() self._w_monitor = None if hasattr(self, 'clients') and self.clients.closed is False: self.clients.close() if hasattr(self, 'workers') and self.workers.closed is False: self.workers.close() if hasattr(self, 'stream'): self.stream.close() if hasattr(self, '_socket') and self._socket.closed is False: self._socket.close() if hasattr(self, 'context') and self.context.closed is False: self.context.term() def pre_fork(self, process_manager): ''' Pre-fork we need to create the zmq router device :param func process_manager: An instance of salt.utils.process.ProcessManager ''' salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager) process_manager.add_process(self.zmq_device) def post_fork(self, payload_handler, io_loop): ''' After forking we need to create all of the local sockets to listen to the router :param func payload_handler: A function to called to handle incoming payloads as they are picked up off the wire :param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling ''' self.payload_handler = payload_handler self.io_loop = io_loop self.context = zmq.Context(1) self._socket = self.context.socket(zmq.REP) if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']: # Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here import threading self._w_monitor = ZeroMQSocketMonitor(self._socket) t = threading.Thread(target=self._w_monitor.start_poll) t.start() if self.opts.get('ipc_mode', '') == 'tcp': self.w_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_workers', 4515) ) else: self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Worker binding to socket {0}'.format(self.w_uri)) self._socket.connect(self.w_uri) salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop) self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop) self.stream.on_recv_stream(self.handle_message) @tornado.gen.coroutine def handle_message(self, stream, payload): ''' Handle incoming messages from underylying TCP streams :stream ZMQStream stream: A ZeroMQ stream. See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html :param dict payload: A payload to process ''' try: payload = self.serial.loads(payload[0]) payload = self._decode_payload(payload) except Exception as exc: exc_type = type(exc).__name__ if exc_type == 'AuthenticationError': log.debug( 'Minion failed to auth to master. Since the payload is ' 'encrypted, it is not known which minion failed to ' 'authenticate. It is likely that this is a transient ' 'failure due to the master rotating its public key.' ) else: log.error('Bad load from minion: %s: %s', exc_type, exc) stream.send(self.serial.dumps('bad load')) raise tornado.gen.Return() # TODO helper functions to normalize payload? if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict): log.error('payload and load must be a dict. Payload was: {0} and load was {1}'.format(payload, payload.get('load'))) stream.send(self.serial.dumps('payload and load must be a dict')) raise tornado.gen.Return() # intercept the "_auth" commands, since the main daemon shouldn't know # anything about our key auth if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth': stream.send(self.serial.dumps(self._auth(payload['load']))) raise tornado.gen.Return() # TODO: test try: # Take the payload_handler function that was registered when we created the channel # and call it, returning control to the caller until it completes ret, req_opts = yield self.payload_handler(payload) except Exception as e: # always attempt to return an error to the minion stream.send('Some exception handling minion payload') log.error('Some exception handling a payload from minion', exc_info=True) raise tornado.gen.Return() req_fun = req_opts.get('fun', 'send') if req_fun == 'send_clear': stream.send(self.serial.dumps(ret)) elif req_fun == 'send': stream.send(self.serial.dumps(self.crypticle.dumps(ret))) elif req_fun == 'send_private': stream.send(self.serial.dumps(self._encrypt_private(ret, req_opts['key'], req_opts['tgt'], ))) else: log.error('Unknown req_fun {0}'.format(req_fun)) # always attempt to return an error to the minion stream.send('Server-side exception handling payload') raise tornado.gen.Return() def __setup_signals(self): signal.signal(signal.SIGINT, self._handle_signals) signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): msg = '{0} received a '.format(self.__class__.__name__) if signum == signal.SIGINT: msg += 'SIGINT' elif signum == signal.SIGTERM: msg += 'SIGTERM' msg += '. Exiting' log.debug(msg) self.close() sys.exit(salt.defaults.exitcodes.EX_OK) def _set_tcp_keepalive(zmq_socket, opts): ''' Ensure that TCP keepalives are set as specified in "opts". Warning: Failure to set TCP keepalives on the salt-master can result in not detecting the loss of a minion when the connection is lost or when it's host has been terminated without first closing the socket. Salt's Presence System depends on this connection status to know if a minion is "present". Warning: Failure to set TCP keepalives on minions can result in frequent or unexpected disconnects! ''' if hasattr(zmq, 'TCP_KEEPALIVE') and opts: if 'tcp_keepalive' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE, opts['tcp_keepalive'] ) if 'tcp_keepalive_idle' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle'] ) if 'tcp_keepalive_cnt' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt'] ) if 'tcp_keepalive_intvl' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl'] ) class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel): ''' Encapsulate synchronous operations for a publisher channel ''' def __init__(self, opts): self.opts = opts self.serial = salt.payload.Serial(self.opts) # TODO: in init? self.ckminions = salt.utils.minions.CkMinions(self.opts) def connect(self): return tornado.gen.sleep(5) def _publish_daemon(self): ''' Bind to the interface specified in the configuration file ''' salt.utils.appendproctitle(self.__class__.__name__) # Set up the context context = zmq.Context(1) # Prepare minion publish socket pub_sock = context.socket(zmq.PUB) _set_tcp_keepalive(pub_sock, self.opts) # if 2.1 >= zmq < 3.0, we only have one HWM setting try: pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000)) # in zmq >= 3.0, there are separate send and receive HWM settings except AttributeError: # Set the High Water Marks. For more information on HWM, see: # http://api.zeromq.org/4-1:zmq-setsockopt pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000)) pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000)) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses pub_sock.setsockopt(zmq.IPV4ONLY, 0) pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000)) pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts) # Prepare minion pull socket pull_sock = context.socket(zmq.PULL) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) salt.utils.zeromq.check_ipc_path_max_len(pull_uri) # Start the minion command publisher log.info('Starting the Salt Publisher on {0}'.format(pub_uri)) pub_sock.bind(pub_uri) # Securely create socket log.info('Starting the Salt Puller on {0}'.format(pull_uri)) old_umask = os.umask(0o177) try: pull_sock.bind(pull_uri) finally: os.umask(old_umask) try: while True: # Catch and handle EINTR from when this process is sent # SIGUSR1 gracefully so we don't choke and die horribly try: package = pull_sock.recv() unpacked_package = salt.payload.unpackage(package) if six.PY3: unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package) payload = unpacked_package['payload'] if self.opts['zmq_filtering']: # if you have a specific topic list, use that if 'topic_lst' in unpacked_package: for topic in unpacked_package['topic_lst']: # zmq filters are substring match, hash the topic # to avoid collisions htopic = hashlib.sha1(topic).hexdigest() pub_sock.send(htopic, flags=zmq.SNDMORE) pub_sock.send(payload) # otherwise its a broadcast else: # TODO: constants file for "broadcast" pub_sock.send('broadcast', flags=zmq.SNDMORE) pub_sock.send(payload) else: pub_sock.send(payload) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except KeyboardInterrupt: # Cleanly close the sockets if we're shutting down if pub_sock.closed is False: pub_sock.setsockopt(zmq.LINGER, 1) pub_sock.close() if pull_sock.closed is False: pull_sock.setsockopt(zmq.LINGER, 1) pull_sock.close() if context.closed is False: context.term() def pre_fork(self, process_manager): ''' Do anything necessary pre-fork. Since this is on the master side this will primarily be used to create IPC channels and create our daemon process to do the actual publishing :param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager ''' process_manager.add_process(self._publish_daemon) def publish(self, load): ''' Publish "load" to minions :param dict load: A load to be sent across the wire to minions ''' payload = {'enc': 'aes'} crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value) payload['load'] = crypticle.dumps(load) if self.opts['sign_pub_messages']: master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem') log.debug("Signing data packet") payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load']) # Send 0MQ to the publisher context = zmq.Context(1) pub_sock = context.socket(zmq.PUSH) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) pub_sock.connect(pull_uri) int_payload = {'payload': self.serial.dumps(payload)} # add some targeting stuff for lists only (for now) if load['tgt_type'] == 'list': int_payload['topic_lst'] = load['tgt'] # If zmq_filtering is enabled, target matching has to happen master side match_targets = ["pcre", "glob", "list"] if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets: # Fetch a list of minions that match match_ids = self.ckminions.check_minions(load['tgt'], tgt_type=load['tgt_type']) log.debug("Publish Side Match: {0}".format(match_ids)) # Send list of miions thru so zmq can target them int_payload['topic_lst'] = match_ids pub_sock.send(self.serial.dumps(int_payload)) pub_sock.close() context.term() class AsyncReqMessageClientPool(salt.transport.MessageClientPool): ''' Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket. ''' def __init__(self, opts, args=None, kwargs=None): super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs) def __del__(self): self.destroy() def destroy(self): for message_client in self.message_clients: message_client.destroy() self.message_clients = [] def send(self, *args, **kwargs): message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue)) return message_clients[0].send(*args, **kwargs) # TODO: unit tests! class AsyncReqMessageClient(object): ''' This class wraps the underylying zeromq REQ socket and gives a future-based interface to sending and recieving messages. This works around the primary limitation of serialized send/recv on the underlying socket by queueing the message sends in this class. In the future if we decide to attempt to multiplex we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial ''' def __init__(self, opts, addr, linger=0, io_loop=None): ''' Create an asynchronous message client :param dict opts: The salt opts dictionary :param str addr: The interface IP address to bind to :param int linger: The number of seconds to linger on a ZMQ socket. See http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER] :param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop] ''' self.opts = opts self.addr = addr self.linger = linger if io_loop is None: zmq.eventloop.ioloop.install() tornado.ioloop.IOLoop.current() else: self.io_loop = io_loop self.serial = salt.payload.Serial(self.opts) self.context = zmq.Context() # wire up sockets self._init_socket() self.send_queue = [] # mapping of message -> future self.send_future_map = {} self.send_timeout_map = {} # message -> timeout # TODO: timeout all in-flight sessions, or error def destroy(self): if hasattr(self, 'stream') and self.stream is not None: # TODO: Optionally call stream.close() on newer pyzmq? It is broken on some. if self.stream.socket: self.stream.socket.close() self.stream.io_loop.remove_handler(self.stream.socket) # set this to None, more hacks for messed up pyzmq self.stream.socket = None self.stream = None self.socket.close() if self.context.closed is False: self.context.term() def __del__(self): self.destroy() def _init_socket(self): if hasattr(self, 'stream'): self.stream.close() # pylint: disable=E0203 self.socket.close() # pylint: disable=E0203 del self.stream del self.socket self.socket = self.context.socket(zmq.REQ) # socket options if hasattr(zmq, 'RECONNECT_IVL_MAX'): self.socket.setsockopt( zmq.RECONNECT_IVL_MAX, 5000 ) _set_tcp_keepalive(self.socket, self.opts) if self.addr.startswith('tcp://['): # Hint PF type if bracket enclosed IPv6 address if hasattr(zmq, 'IPV6'): self.socket.setsockopt(zmq.IPV6, 1) elif hasattr(zmq, 'IPV4ONLY'): self.socket.setsockopt(zmq.IPV4ONLY, 0) self.socket.linger = self.linger self.socket.connect(self.addr) self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop) @tornado.gen.coroutine def _internal_send_recv(self): while len(self.send_queue) > 0: message = self.send_queue[0] future = self.send_future_map.get(message, None) if future is None: # Timedout del self.send_queue[0] continue # send def mark_future(msg): if not future.done(): data = self.serial.loads(msg[0]) future.set_result(data) self.stream.on_recv(mark_future) self.stream.send(message) try: ret = yield future except: # pylint: disable=W0702 self._init_socket() # re-init the zmq socket (no other way in zmq) del self.send_queue[0] continue del self.send_queue[0] self.send_future_map.pop(message, None) self.remove_message_timeout(message) def remove_message_timeout(self, message): if message not in self.send_timeout_map: return timeout = self.send_timeout_map.pop(message, None) if timeout is not None: # Hasn't been already timedout self.io_loop.remove_timeout(timeout) def timeout_message(self, message): ''' Handle a message timeout by removing it from the sending queue and informing the caller :raises: SaltReqTimeoutError ''' future = self.send_future_map.pop(message, None) # In a race condition the message might have been sent by the time # we're timing it out. Make sure the future is not None if future is not None: del self.send_timeout_map[message] if future.attempts < future.tries: future.attempts += 1 log.debug('SaltReqTimeoutError, retrying. ({0}/{1})'.format(future.attempts, future.tries)) self.send( message, timeout=future.timeout, tries=future.tries, future=future, ) else: future.set_exception(SaltReqTimeoutError('Message timed out')) def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False): ''' Return a future which will be completed when the message has a response ''' if future is None: future = tornado.concurrent.Future() future.tries = tries future.attempts = 0 future.timeout = timeout # if a future wasn't passed in, we need to serialize the message message = self.serial.dumps(message) if callback is not None: def handle_future(future): response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) # Add this future to the mapping self.send_future_map[message] = future if self.opts.get('detect_mode') is True: timeout = 1 if timeout is not None: send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message) self.send_timeout_map[message] = send_timeout if len(self.send_queue) == 0: self.io_loop.spawn_callback(self._internal_send_recv) self.send_queue.append(message) return future class ZeroMQSocketMonitor(object): __EVENT_MAP = None def __init__(self, socket): ''' Create ZMQ monitor sockets More information: http://api.zeromq.org/4-0:zmq-socket-monitor ''' self._socket = socket self._monitor_socket = self._socket.get_monitor_socket() self._monitor_stream = None def start_io_loop(self, io_loop): log.trace("Event monitor start!") self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop) self._monitor_stream.on_recv(self.monitor_callback) def start_poll(self): log.trace("Event monitor start!") try: while self._monitor_socket is not None and self._monitor_socket.poll(): msg = self._monitor_socket.recv_multipart() self.monitor_callback(msg) except (AttributeError, zmq.error.ContextTerminated): # We cannot log here because we'll get an interrupted system call in trying # to flush the logging buffer as we terminate pass @property def event_map(self): if ZeroMQSocketMonitor.__EVENT_MAP is None: event_map = {} for name in dir(zmq): if name.startswith('EVENT_'): value = getattr(zmq, name) event_map[value] = name ZeroMQSocketMonitor.__EVENT_MAP = event_map return ZeroMQSocketMonitor.__EVENT_MAP def monitor_callback(self, msg): evt = zmq.utils.monitor.parse_monitor_message(msg) evt['description'] = self.event_map[evt['event']] log.debug("ZeroMQ event: {0}".format(evt)) if evt['event'] == zmq.EVENT_MONITOR_STOPPED: self.stop() def stop(self): if self._socket is None: return self._socket.disable_monitor() self._socket = None self._monitor_socket = None if self._monitor_stream is not None: self._monitor_stream.close() self._monitor_stream = None log.trace("Event monitor done!")
./CrossVul/dataset_final_sorted/CWE-20/py/bad_2816_2
crossvul-python_data_good_1739_0
"""Serve files directly from the ContentsManager.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import os import mimetypes import json import base64 from tornado import web from IPython.html.base.handlers import IPythonHandler class FilesHandler(IPythonHandler): """serve files via ContentsManager""" @web.authenticated def get(self, path): cm = self.contents_manager if cm.is_hidden(path): self.log.info("Refusing to serve hidden file, via 404 Error") raise web.HTTPError(404) path = path.strip('/') if '/' in path: _, name = path.rsplit('/', 1) else: name = path model = cm.get(path, type='file') if self.get_argument("download", False): self.set_header('Content-Disposition','attachment; filename="%s"' % name) # get mimetype from filename if name.endswith('.ipynb'): self.set_header('Content-Type', 'application/json') else: cur_mime = mimetypes.guess_type(name)[0] if cur_mime is not None: self.set_header('Content-Type', cur_mime) else: if model['format'] == 'base64': self.set_header('Content-Type', 'application/octet-stream') else: self.set_header('Content-Type', 'text/plain') if model['format'] == 'base64': b64_bytes = model['content'].encode('ascii') self.write(base64.decodestring(b64_bytes)) elif model['format'] == 'json': self.write(json.dumps(model['content'])) else: self.write(model['content']) self.flush() default_handlers = [ (r"/files/(.*)", FilesHandler), ]
./CrossVul/dataset_final_sorted/CWE-20/py/good_1739_0
crossvul-python_data_bad_50_6
# -*- coding: utf-8 -*- # # http://www.privacyidea.org # (c) cornelius kölbel, privacyidea.org # # 2018-01-22 Cornelius Kölbel <cornelius.koelbel@netknights.it> # Add offline refill # 2016-12-20 Cornelius Kölbel <cornelius.koelbel@netknights.it> # Add triggerchallenge endpoint # 2016-10-23 Cornelius Kölbel <cornelius.koelbel@netknights.it> # Add subscription decorator # 2016-09-05 Cornelius Kölbel <cornelius.koelbel@netknights.it> # SAML attributes on fail # 2016-08-30 Cornelius Kölbel <cornelius.koelbel@netknights.it> # save client application type to database # 2016-08-09 Cornelius Kölbel <cornelius@privacyidea.org> # Add possiblity to check OTP only # 2015-11-19 Cornelius Kölbel <cornelius@privacyidea.org> # Add support for transaction_id to saml_check # 2015-06-17 Cornelius Kölbel <cornelius@privacyidea.org> # Add policy decorator for API key requirement # 2014-12-08 Cornelius Kölbel, <cornelius@privacyidea.org> # Complete rewrite during flask migration # Try to provide REST API # # This code is free software; you can redistribute it and/or # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE # License as published by the Free Software Foundation; either # version 3 of the License, or any later version. # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU AFFERO GENERAL PUBLIC LICENSE for more details. # # You should have received a copy of the GNU Affero General Public # License along with this program. If not, see <http://www.gnu.org/licenses/>. # __doc__ = """This module contains the REST API for doing authentication. The methods are tested in the file tests/test_api_validate.py Authentication is either done by providing a username and a password or a serial number and a password. **Authentication workflow** Authentication workflow is like this: In case of authenticating a user: * :func:`privacyidea.lib.token.check_user_pass` * :func:`privacyidea.lib.token.check_token_list` * :func:`privacyidea.lib.tokenclass.TokenClass.authenticate` * :func:`privacyidea.lib.tokenclass.TokenClass.check_pin` * :func:`privacyidea.lib.tokenclass.TokenClass.check_otp` In case if authenitcating a serial number: * :func:`privacyidea.lib.token.check_serial_pass` * :func:`privacyidea.lib.token.check_token_list` * :func:`privacyidea.lib.tokenclass.TokenClass.authenticate` * :func:`privacyidea.lib.tokenclass.TokenClass.check_pin` * :func:`privacyidea.lib.tokenclass.TokenClass.check_otp` """ from flask import (Blueprint, request, g, current_app) from privacyidea.lib.user import get_user_from_param from .lib.utils import send_result, getParam from ..lib.decorators import (check_user_or_serial_in_request) from .lib.utils import required from privacyidea.lib.error import ParameterError from privacyidea.lib.token import (check_user_pass, check_serial_pass, check_otp) from privacyidea.api.lib.utils import get_all_params from privacyidea.lib.config import (return_saml_attributes, get_from_config, return_saml_attributes_on_fail, SYSCONF) from privacyidea.lib.audit import getAudit from privacyidea.api.lib.prepolicy import (prepolicy, set_realm, api_key_required, mangle, save_client_application_type, check_base_action) from privacyidea.api.lib.postpolicy import (postpolicy, check_tokentype, check_serial, check_tokeninfo, no_detail_on_fail, no_detail_on_success, autoassign, offline_info, add_user_detail_to_response, construct_radius_response) from privacyidea.lib.policy import PolicyClass from privacyidea.lib.config import ConfigClass from privacyidea.lib.event import EventConfiguration import logging from privacyidea.api.lib.postpolicy import postrequest, sign_response from privacyidea.api.auth import jwtauth from privacyidea.api.register import register_blueprint from privacyidea.api.recover import recover_blueprint from privacyidea.lib.utils import get_client_ip from privacyidea.lib.event import event from privacyidea.lib.subscriptions import CheckSubscription from privacyidea.api.auth import admin_required from privacyidea.lib.policy import ACTION from privacyidea.lib.token import get_tokens from privacyidea.lib.machine import list_token_machines from privacyidea.lib.applications.offline import MachineApplication import json log = logging.getLogger(__name__) validate_blueprint = Blueprint('validate_blueprint', __name__) @validate_blueprint.before_request @register_blueprint.before_request @recover_blueprint.before_request def before_request(): """ This is executed before the request """ g.config_object = ConfigClass() request.all_data = get_all_params(request.values, request.data) request.User = get_user_from_param(request.all_data) privacyidea_server = current_app.config.get("PI_AUDIT_SERVERNAME") or \ request.host # Create a policy_object, that reads the database audit settings # and contains the complete policy definition during the request. # This audit_object can be used in the postpolicy and prepolicy and it # can be passed to the innerpolicies. g.policy_object = PolicyClass() g.audit_object = getAudit(current_app.config) g.event_config = EventConfiguration() # access_route contains the ip addresses of all clients, hops and proxies. g.client_ip = get_client_ip(request, get_from_config(SYSCONF.OVERRIDECLIENT)) g.audit_object.log({"success": False, "action_detail": "", "client": g.client_ip, "client_user_agent": request.user_agent.browser, "privacyidea_server": privacyidea_server, "action": "{0!s} {1!s}".format(request.method, request.url_rule), "info": ""}) @validate_blueprint.after_request @register_blueprint.after_request @recover_blueprint.after_request @jwtauth.after_request @postrequest(sign_response, request=request) def after_request(response): """ This function is called after a request :return: The response """ # In certain error cases the before_request was not handled # completely so that we do not have an audit_object if "audit_object" in g: g.audit_object.finalize_log() # No caching! response.headers['Cache-Control'] = 'no-cache' return response @validate_blueprint.route('/offlinerefill', methods=['POST']) @event("validate_offlinerefill", request, g) def offlinerefill(): """ This endpoint allows to fetch new offline OTP values for a token, that is already offline. According to the definition it will send the missing OTP values, so that the client will have as much otp values as defined. :param serial: The serial number of the token, that should be refilled. :param refilltoken: The authorization token, that allows refilling. :param pass: the last password (maybe password+OTP) entered by the user :return: """ result = False otps = {} serial = getParam(request.all_data, "serial", required) refilltoken = getParam(request.all_data, "refilltoken", required) password = getParam(request.all_data, "pass", required) tokenobj_list = get_tokens(serial=serial) if len(tokenobj_list) != 1: raise ParameterError("The token does not exist") else: tokenobj = tokenobj_list[0] machine_defs = list_token_machines(serial) # check if is still an offline token: for mdef in machine_defs: if mdef.get("application") == "offline": # check refill token: if tokenobj.get_tokeninfo("refilltoken") == refilltoken: # refill otps = MachineApplication.get_refill(tokenobj, password, mdef.get("options")) refilltoken = MachineApplication.generate_new_refilltoken(tokenobj) response = send_result(True) content = json.loads(response.data) content["auth_items"] = {"offline": [{"refilltoken": refilltoken, "response": otps}]} response.data = json.dumps(content) return response raise ParameterError("Token is not an offline token or refill token is incorrect") @validate_blueprint.route('/check', methods=['POST', 'GET']) @validate_blueprint.route('/radiuscheck', methods=['POST', 'GET']) @postpolicy(construct_radius_response, request=request) @postpolicy(no_detail_on_fail, request=request) @postpolicy(no_detail_on_success, request=request) @postpolicy(add_user_detail_to_response, request=request) @postpolicy(offline_info, request=request) @postpolicy(check_tokeninfo, request=request) @postpolicy(check_tokentype, request=request) @postpolicy(check_serial, request=request) @postpolicy(autoassign, request=request) @prepolicy(set_realm, request=request) @prepolicy(mangle, request=request) @prepolicy(save_client_application_type, request=request) @check_user_or_serial_in_request(request) @CheckSubscription(request) @prepolicy(api_key_required, request=request) @event("validate_check", request, g) def check(): """ check the authentication for a user or a serial number. Either a ``serial`` or a ``user`` is required to authenticate. The PIN and OTP value is sent in the parameter ``pass``. In case of successful authentication it returns ``result->value: true``. In case of a challenge response authentication a parameter ``exception=1`` can be passed. This would result in a HTTP 500 Server Error response if an error occurred during sending of SMS or Email. In case ``/validate/radiuscheck`` is requested, the responses are modified as follows: A successful authentication returns an empty HTTP 204 response. An unsuccessful authentication returns an empty HTTP 400 response. Error responses are the same responses as for the ``/validate/check`` endpoint. :param serial: The serial number of the token, that tries to authenticate. :param user: The loginname/username of the user, who tries to authenticate. :param realm: The realm of the user, who tries to authenticate. If the realm is omitted, the user is looked up in the default realm. :param pass: The password, that consists of the OTP PIN and the OTP value. :param otponly: If set to 1, only the OTP value is verified. This is used in the management UI. Only used with the parameter serial. :param transaction_id: The transaction ID for a response to a challenge request :param state: The state ID for a response to a challenge request :return: a json result with a boolean "result": true **Example Validation Request**: .. sourcecode:: http POST /validate/check HTTP/1.1 Host: example.com Accept: application/json user=user realm=realm1 pass=s3cret123456 **Example response** for a successful authentication: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "detail": { "message": "matching 1 tokens", "serial": "PISP0000AB00", "type": "spass" }, "id": 1, "jsonrpc": "2.0", "result": { "status": true, "value": true }, "version": "privacyIDEA unknown" } **Example response** for this first part of a challenge response authentication: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "detail": { "serial": "PIEM0000AB00", "type": "email", "transaction_id": "12345678901234567890", "multi_challenge: [ {"serial": "PIEM0000AB00", "transaction_id": "12345678901234567890", "message": "Please enter otp from your email"}, {"serial": "PISM12345678", "transaction_id": "12345678901234567890", "message": "Please enter otp from your SMS"} ] }, "id": 1, "jsonrpc": "2.0", "result": { "status": true, "value": false }, "version": "privacyIDEA unknown" } In this example two challenges are triggered, one with an email and one with an SMS. The application and thus the user has to decide, which one to use. They can use either. .. note:: All challenge response tokens have the same transaction_id in this case. """ #user = get_user_from_param(request.all_data) user = request.User serial = getParam(request.all_data, "serial") password = getParam(request.all_data, "pass", required) otp_only = getParam(request.all_data, "otponly") options = {"g": g, "clientip": g.client_ip} # Add all params to the options for key, value in request.all_data.items(): if value and key not in ["g", "clientip"]: options[key] = value g.audit_object.log({"user": user.login, "resolver": user.resolver, "realm": user.realm}) if serial: if not otp_only: result, details = check_serial_pass(serial, password, options=options) else: result, details = check_otp(serial, password) else: result, details = check_user_pass(user, password, options=options) g.audit_object.log({"info": details.get("message"), "success": result, "serial": serial or details.get("serial"), "tokentype": details.get("type")}) return send_result(result, details=details) @validate_blueprint.route('/samlcheck', methods=['POST', 'GET']) @postpolicy(no_detail_on_fail, request=request) @postpolicy(no_detail_on_success, request=request) @postpolicy(add_user_detail_to_response, request=request) @postpolicy(check_tokeninfo, request=request) @postpolicy(check_tokentype, request=request) @postpolicy(check_serial, request=request) @postpolicy(autoassign, request=request) @prepolicy(set_realm, request=request) @prepolicy(mangle, request=request) @prepolicy(save_client_application_type, request=request) @check_user_or_serial_in_request(request) @CheckSubscription(request) @prepolicy(api_key_required, request=request) @event("validate_check", request, g) def samlcheck(): """ Authenticate the user and return the SAML user information. :param user: The loginname/username of the user, who tries to authenticate. :param realm: The realm of the user, who tries to authenticate. If the realm is omitted, the user is looked up in the default realm. :param pass: The password, that consists of the OTP PIN and the OTP value. :return: a json result with a boolean "result": true **Example response** for a successful authentication: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "detail": { "message": "matching 1 tokens", "serial": "PISP0000AB00", "type": "spass" }, "id": 1, "jsonrpc": "2.0", "result": { "status": true, "value": {"attributes": { "username": "koelbel", "realm": "themis", "mobile": null, "phone": null, "myOwn": "/data/file/home/koelbel", "resolver": "themis", "surname": "Kölbel", "givenname": "Cornelius", "email": null}, "auth": true} }, "version": "privacyIDEA unknown" } The response in value->attributes can contain additional attributes (like "myOwn") which you can define in the LDAP resolver in the attribute mapping. """ user = get_user_from_param(request.all_data) password = getParam(request.all_data, "pass", required) options = {"g": g, "clientip": g.client_ip} # Add all params to the options for key, value in request.all_data.items(): if value and key not in ["g", "clientip"]: options[key] = value auth, details = check_user_pass(user, password, options=options) ui = user.info result_obj = {"auth": auth, "attributes": {}} if return_saml_attributes(): if auth or return_saml_attributes_on_fail(): # privacyIDEA's own attribute map result_obj["attributes"] = {"username": ui.get("username"), "realm": user.realm, "resolver": user.resolver, "email": ui.get("email"), "surname": ui.get("surname"), "givenname": ui.get("givenname"), "mobile": ui.get("mobile"), "phone": ui.get("phone") } # additional attributes for k, v in ui.iteritems(): result_obj["attributes"][k] = v g.audit_object.log({"info": details.get("message"), "success": auth, "serial": details.get("serial"), "tokentype": details.get("type"), "user": user.login, "resolver": user.resolver, "realm": user.realm}) return send_result(result_obj, details=details) @validate_blueprint.route('/triggerchallenge', methods=['POST', 'GET']) @admin_required @check_user_or_serial_in_request(request) @prepolicy(check_base_action, request, action=ACTION.TRIGGERCHALLENGE) @event("validate_triggerchallenge", request, g) def trigger_challenge(): """ An administrator can call this endpoint if he has the right of ``triggerchallenge`` (scope: admin). He can pass a ``user`` name and or a ``serial`` number. privacyIDEA will trigger challenges for all native challenges response tokens, possessed by this user or only for the given serial number. The request needs to contain a valid PI-Authorization header. :param user: The loginname/username of the user, who tries to authenticate. :param realm: The realm of the user, who tries to authenticate. If the realm is omitted, the user is looked up in the default realm. :param serial: The serial number of the token. :return: a json result with a "result" of the number of matching challenge response tokens **Example response** for a successful triggering of challenge: .. sourcecode:: http {"jsonrpc": "2.0", "signature": "1939...146964", "detail": {"transaction_ids": ["03921966357577766962"], "messages": ["Enter the OTP from the SMS:"], "threadid": 140422378276608}, "versionnumber": "unknown", "version": "privacyIDEA unknown", "result": {"status": true, "value": 1}, "time": 1482223663.517212, "id": 1} **Example response** for response, if the user has no challenge token: .. sourcecode:: http {"detail": {"messages": [], "threadid": 140031212377856, "transaction_ids": []}, "id": 1, "jsonrpc": "2.0", "result": {"status": true, "value": 0}, "signature": "205530282...54508", "time": 1484303812.346576, "version": "privacyIDEA 2.17", "versionnumber": "2.17"} **Example response** for a failed triggering of a challenge. In this case the ``status`` will be ``false``. .. sourcecode:: http {"detail": null, "id": 1, "jsonrpc": "2.0", "result": {"error": {"code": 905, "message": "ERR905: The user can not be found in any resolver in this realm!"}, "status": false}, "signature": "14468...081555", "time": 1484303933.72481, "version": "privacyIDEA 2.17"} """ user = request.User serial = getParam(request.all_data, "serial") result_obj = 0 details = {"messages": [], "transaction_ids": []} options = {"g": g, "clientip": g.client_ip, "user": user} token_objs = get_tokens(serial=serial, user=user, active=True, revoked=False, locked=False) for token_obj in token_objs: if "challenge" in token_obj.mode: # If this is a challenge response token, we create a challenge success, return_message, transactionid, attributes = \ token_obj.create_challenge(options=options) if attributes: details["attributes"] = attributes if success: result_obj += 1 details.get("transaction_ids").append(transactionid) # This will write only the serial of the token that was processed last to the audit log g.audit_object.log({ "serial": token_obj.token.serial, }) details.get("messages").append(return_message) g.audit_object.log({ "user": user.login, "resolver": user.resolver, "realm": user.realm, "success": result_obj > 0, "info": "triggered {0!s} challenges".format(result_obj), }) return send_result(result_obj, details=details)
./CrossVul/dataset_final_sorted/CWE-20/py/bad_50_6
crossvul-python_data_good_1467_1
# coding: UTF-8 '''Mock D-BUS objects for test suites.''' # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) any # later version. See http://www.gnu.org/copyleft/lgpl.html for the full text # of the license. __author__ = 'Martin Pitt' __email__ = 'martin.pitt@ubuntu.com' __copyright__ = '(c) 2012 Canonical Ltd.' __license__ = 'LGPL 3+' import copy import time import sys import types import importlib import imp from xml.etree import ElementTree # we do not use this ourselves, but mock methods often want to use this import os os # pyflakes import dbus import dbus.service # global path -> DBusMockObject mapping objects = {} MOCK_IFACE = 'org.freedesktop.DBus.Mock' OBJECT_MANAGER_IFACE = 'org.freedesktop.DBus.ObjectManager' # stubs to keep code compatible with Python 2 and 3 if sys.version_info[0] >= 3: long = int unicode = str def load_module(name): if os.path.exists(name) and os.path.splitext(name)[1] == '.py': mod = imp.new_module(os.path.splitext(os.path.basename(name))[0]) with open(name) as f: exec(f.read(), mod.__dict__, mod.__dict__) return mod return importlib.import_module('dbusmock.templates.' + name) class DBusMockObject(dbus.service.Object): '''Mock D-Bus object This can be configured to have arbitrary methods (including code execution) and properties via methods on the org.freedesktop.DBus.Mock interface, so that you can control the mock from any programming language. ''' def __init__(self, bus_name, path, interface, props, logfile=None, is_object_manager=False): '''Create a new DBusMockObject bus_name: A dbus.service.BusName instance where the object will be put on path: D-Bus object path interface: Primary D-Bus interface name of this object (where properties and methods will be put on) props: A property_name (string) → property (Variant) map with initial properties on "interface" logfile: When given, method calls will be logged into that file name; if None, logging will be written to stdout. Note that you can also query the called methods over D-BUS with GetCalls() and GetMethodCalls(). is_object_manager: If True, the GetManagedObjects method will automatically be implemented on the object, returning all objects which have this one’s path as a prefix of theirs. Note that the InterfacesAdded and InterfacesRemoved signals will not be automatically emitted. ''' dbus.service.Object.__init__(self, bus_name, path) self.bus_name = bus_name self.path = path self.interface = interface self.is_object_manager = is_object_manager self._template = None self._template_parameters = None if logfile: self.logfile = open(logfile, 'w') else: self.logfile = None self.is_logfile_owner = True self.call_log = [] if props is None: props = {} self._reset(props) def __del__(self): if self.logfile and self.is_logfile_owner: self.logfile.close() def _set_up_object_manager(self): '''Set up this mock object as a D-Bus ObjectManager.''' if self.path == '/': cond = 'k != \'/\'' else: cond = 'k.startswith(\'%s/\')' % self.path self.AddMethod(OBJECT_MANAGER_IFACE, 'GetManagedObjects', '', 'a{oa{sa{sv}}}', 'ret = {dbus.ObjectPath(k): objects[k].props ' + ' for k in objects.keys() if ' + cond + '}') def _reset(self, props): # interface -> name -> value self.props = {self.interface: props} # interface -> name -> (in_signature, out_signature, code, dbus_wrapper_fn) self.methods = {self.interface: {}} if self.is_object_manager: self._set_up_object_manager() @dbus.service.method(dbus.PROPERTIES_IFACE, in_signature='ss', out_signature='v') def Get(self, interface_name, property_name): '''Standard D-Bus API for getting a property value''' self.log('Get %s.%s' % (interface_name, property_name)) if not interface_name: interface_name = self.interface try: return self.GetAll(interface_name)[property_name] except KeyError: raise dbus.exceptions.DBusException( 'no such property ' + property_name, name=self.interface + '.UnknownProperty') @dbus.service.method(dbus.PROPERTIES_IFACE, in_signature='s', out_signature='a{sv}') def GetAll(self, interface_name, *args, **kwargs): '''Standard D-Bus API for getting all property values''' self.log('GetAll ' + interface_name) if not interface_name: interface_name = self.interface try: return self.props[interface_name] except KeyError: raise dbus.exceptions.DBusException( 'no such interface ' + interface_name, name=self.interface + '.UnknownInterface') @dbus.service.method(dbus.PROPERTIES_IFACE, in_signature='ssv', out_signature='') def Set(self, interface_name, property_name, value, *args, **kwargs): '''Standard D-Bus API for setting a property value''' self.log('Set %s.%s%s' % (interface_name, property_name, self.format_args((value,)))) try: iface_props = self.props[interface_name] except KeyError: raise dbus.exceptions.DBusException( 'no such interface ' + interface_name, name=self.interface + '.UnknownInterface') if property_name not in iface_props: raise dbus.exceptions.DBusException( 'no such property ' + property_name, name=self.interface + '.UnknownProperty') iface_props[property_name] = value self.EmitSignal('org.freedesktop.DBus.Properties', 'PropertiesChanged', 'sa{sv}as', [interface_name, dbus.Dictionary({property_name: value}, signature='sv'), dbus.Array([], signature='s') ]) @dbus.service.method(MOCK_IFACE, in_signature='ssa{sv}a(ssss)', out_signature='') def AddObject(self, path, interface, properties, methods): '''Add a new D-Bus object to the mock path: D-Bus object path interface: Primary D-Bus interface name of this object (where properties and methods will be put on) properties: A property_name (string) → value map with initial properties on "interface" methods: An array of 4-tuples (name, in_sig, out_sig, code) describing methods to add to "interface"; see AddMethod() for details of the tuple values If this is a D-Bus ObjectManager instance, the InterfacesAdded signal will *not* be emitted for the object automatically; it must be emitted manually if desired. This is because AddInterface may be called after AddObject, but before the InterfacesAdded signal should be emitted. Example: dbus_proxy.AddObject('/com/example/Foo/Manager', 'com.example.Foo.Control', { 'state': dbus.String('online', variant_level=1), }, [ ('Start', '', '', ''), ('EchoInt', 'i', 'i', 'ret = args[0]'), ('GetClients', '', 'ao', 'ret = ["/com/example/Foo/Client1"]'), ]) ''' if path in objects: raise dbus.exceptions.DBusException( 'object %s already exists' % path, name='org.freedesktop.DBus.Mock.NameError') obj = DBusMockObject(self.bus_name, path, interface, properties) # make sure created objects inherit the log file stream obj.logfile = self.logfile obj.is_logfile_owner = False obj.AddMethods(interface, methods) objects[path] = obj @dbus.service.method(MOCK_IFACE, in_signature='s', out_signature='') def RemoveObject(self, path): '''Remove a D-Bus object from the mock As with AddObject, this will *not* emit the InterfacesRemoved signal if it’s an ObjectManager instance. ''' try: objects[path].remove_from_connection() del objects[path] except KeyError: raise dbus.exceptions.DBusException( 'object %s does not exist' % path, name='org.freedesktop.DBus.Mock.NameError') @dbus.service.method(MOCK_IFACE, in_signature='', out_signature='') def Reset(self): '''Reset the mock object state. Remove all mock objects from the bus and tidy up so the state is as if python-dbusmock had just been restarted. If the mock object was originally created with a template (from the command line, the Python API or by calling AddTemplate over D-Bus), it will be re-instantiated with that template. ''' # Clear other existing objects. for obj_name, obj in objects.items(): if obj_name != self.path: obj.remove_from_connection() objects.clear() # Reinitialise our state. Carefully remove new methods from our dict; # they don't not actually exist if they are a statically defined # template function for method_name in self.methods[self.interface]: try: delattr(self.__class__, method_name) except AttributeError: pass self._reset({}) if self._template is not None: self.AddTemplate(self._template, self._template_parameters) objects[self.path] = self @dbus.service.method(MOCK_IFACE, in_signature='sssss', out_signature='') def AddMethod(self, interface, name, in_sig, out_sig, code): '''Add a method to this object interface: D-Bus interface to add this to. For convenience you can specify '' here to add the method to the object's main interface (as specified on construction). name: Name of the method in_sig: Signature of input arguments; for example "ias" for a method that takes an int32 and a string array as arguments; see http://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-signatures out_sig: Signature of output arguments; for example "s" for a method that returns a string; use '' for methods that do not return anything. code: Python 3 code to run in the method call; you have access to the arguments through the "args" list, and can set the return value by assigning a value to the "ret" variable. You can also read the global "objects" variable, which is a dictionary mapping object paths to DBusMockObject instances. For keeping state across method calls, you are free to use normal Python members of the "self" object, which will be persistant for the whole mock's life time. E. g. you can have a method with "self.my_state = True", and another method that returns it with "ret = self.my_state". When specifying '', the method will not do anything (except logging) and return None. ''' if not interface: interface = self.interface n_args = len(dbus.Signature(in_sig)) # we need to have separate methods for dbus-python, so clone # mock_method(); using message_keyword with this dynamic approach fails # because inspect cannot handle those, so pass on interface and method # name as first positional arguments method = lambda self, *args, **kwargs: DBusMockObject.mock_method( self, interface, name, in_sig, *args, **kwargs) # we cannot specify in_signature here, as that trips over a consistency # check in dbus-python; we need to set it manually instead dbus_method = dbus.service.method(interface, out_signature=out_sig)(method) dbus_method.__name__ = str(name) dbus_method._dbus_in_signature = in_sig dbus_method._dbus_args = ['arg%i' % i for i in range(1, n_args + 1)] # for convenience, add mocked methods on the primary interface as # callable methods if interface == self.interface: setattr(self.__class__, name, dbus_method) self.methods.setdefault(interface, {})[str(name)] = (in_sig, out_sig, code, dbus_method) @dbus.service.method(MOCK_IFACE, in_signature='sa(ssss)', out_signature='') def AddMethods(self, interface, methods): '''Add several methods to this object interface: D-Bus interface to add this to. For convenience you can specify '' here to add the method to the object's main interface (as specified on construction). methods: list of 4-tuples (name, in_sig, out_sig, code) describing one method each. See AddMethod() for details of the tuple values. ''' for method in methods: self.AddMethod(interface, *method) @dbus.service.method(MOCK_IFACE, in_signature='ssv', out_signature='') def AddProperty(self, interface, name, value): '''Add property to this object interface: D-Bus interface to add this to. For convenience you can specify '' here to add the property to the object's main interface (as specified on construction). name: Property name. value: Property value. ''' if not interface: interface = self.interface try: self.props[interface][name] raise dbus.exceptions.DBusException( 'property %s already exists' % name, name=self.interface + '.PropertyExists') except KeyError: # this is what we expect pass # copy.copy removes one level of variant-ness, which means that the # types get exported in introspection data correctly, but we can't do # this for container types. if not (isinstance(value, dbus.Dictionary) or isinstance(value, dbus.Array)): value = copy.copy(value) self.props.setdefault(interface, {})[name] = value @dbus.service.method(MOCK_IFACE, in_signature='sa{sv}', out_signature='') def AddProperties(self, interface, properties): '''Add several properties to this object interface: D-Bus interface to add this to. For convenience you can specify '' here to add the property to the object's main interface (as specified on construction). properties: A property_name (string) → value map ''' for k, v in properties.items(): self.AddProperty(interface, k, v) @dbus.service.method(MOCK_IFACE, in_signature='sa{sv}', out_signature='') def AddTemplate(self, template, parameters): '''Load a template into the mock. python-dbusmock ships a set of standard mocks for common system services such as UPower and NetworkManager. With these the actual tests become a lot simpler, as they only have to set up the particular properties for the tests, and not the skeleton of common properties, interfaces, and methods. template: Name of the template to load or the full path to a *.py file for custom templates. See "pydoc dbusmock.templates" for a list of available templates from python-dbusmock package, and "pydoc dbusmock.templates.NAME" for documentation about template NAME. parameters: A parameter (string) → value (variant) map, for parameterizing templates. Each template can define their own, see documentation of that particular template for details. ''' try: module = load_module(template) except ImportError as e: raise dbus.exceptions.DBusException('Cannot add template %s: %s' % (template, str(e)), name='org.freedesktop.DBus.Mock.TemplateError') # If the template specifies this is an ObjectManager, set that up if hasattr(module, 'IS_OBJECT_MANAGER') and module.IS_OBJECT_MANAGER: self._set_up_object_manager() # pick out all D-BUS service methods and add them to our interface for symbol in dir(module): fn = getattr(module, symbol) if ('_dbus_interface' in dir(fn) and ('_dbus_is_signal' not in dir(fn) or not fn._dbus_is_signal)): # for dbus-python compatibility, add methods as callables setattr(self.__class__, symbol, fn) self.methods.setdefault(fn._dbus_interface, {})[str(symbol)] = ( fn._dbus_in_signature, fn._dbus_out_signature, '', fn ) if parameters is None: parameters = {} module.load(self, parameters) # save the given template and parameters for re-instantiation on # Reset() self._template = template self._template_parameters = parameters @dbus.service.method(MOCK_IFACE, in_signature='sssav', out_signature='') def EmitSignal(self, interface, name, signature, args): '''Emit a signal from the object. interface: D-Bus interface to send the signal from. For convenience you can specify '' here to add the method to the object's main interface (as specified on construction). name: Name of the signal signature: Signature of input arguments; for example "ias" for a signal that takes an int32 and a string array as arguments; see http://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-signatures args: variant array with signal arguments; must match order and type in "signature" ''' if not interface: interface = self.interface # convert types of arguments according to signature, using # MethodCallMessage.append(); this will also provide type/length # checks, except for the case of an empty signature if signature == '' and len(args) > 0: raise TypeError('Fewer items found in D-Bus signature than in Python arguments') m = dbus.connection.MethodCallMessage('a.b', '/a', 'a.b', 'a') m.append(signature=signature, *args) args = m.get_args_list() fn = lambda self, *args: self.log('emit %s.%s%s' % (interface, name, self.format_args(args))) fn.__name__ = str(name) dbus_fn = dbus.service.signal(interface)(fn) dbus_fn._dbus_signature = signature dbus_fn._dbus_args = ['arg%i' % i for i in range(1, len(args) + 1)] dbus_fn(self, *args) @dbus.service.method(MOCK_IFACE, in_signature='', out_signature='a(tsav)') def GetCalls(self): '''List all the logged calls since the last call to ClearCalls(). Return a list of (timestamp, method_name, args_list) tuples. ''' return self.call_log @dbus.service.method(MOCK_IFACE, in_signature='s', out_signature='a(tav)') def GetMethodCalls(self, method): '''List all the logged calls of a particular method. Return a list of (timestamp, args_list) tuples. ''' return [(row[0], row[2]) for row in self.call_log if row[1] == method] @dbus.service.method(MOCK_IFACE, in_signature='', out_signature='') def ClearCalls(self): '''Empty the log of mock call signatures.''' self.call_log = [] @dbus.service.signal(MOCK_IFACE, signature='sav') def MethodCalled(self, name, args): '''Signal emitted for every called mock method. This is emitted for all mock method calls. This can be used to confirm that a particular method was called with particular arguments, as an alternative to reading the mock's log or GetCalls(). ''' pass def mock_method(self, interface, dbus_method, in_signature, *args, **kwargs): '''Master mock method. This gets "instantiated" in AddMethod(). Execute the code snippet of the method and return the "ret" variable if it was set. ''' # print('mock_method', dbus_method, self, in_signature, args, kwargs, file=sys.stderr) # convert types of arguments according to signature, using # MethodCallMessage.append(); this will also provide type/length # checks, except for the case of an empty signature if in_signature == '' and len(args) > 0: raise TypeError('Fewer items found in D-Bus signature than in Python arguments') m = dbus.connection.MethodCallMessage('a.b', '/a', 'a.b', 'a') m.append(signature=in_signature, *args) args = m.get_args_list() self.log(dbus_method + self.format_args(args)) self.call_log.append((int(time.time()), str(dbus_method), args)) self.MethodCalled(dbus_method, args) # The code may be a Python 3 string to interpret, or may be a function # object (if AddMethod was called from within Python itself, rather than # over D-Bus). code = self.methods[interface][dbus_method][2] if code and isinstance(code, types.FunctionType): return code(self, *args) elif code: loc = locals().copy() exec(code, globals(), loc) if 'ret' in loc: return loc['ret'] def format_args(self, args): '''Format a D-BUS argument tuple into an appropriate logging string.''' def format_arg(a): if isinstance(a, dbus.Boolean): return str(bool(a)) if isinstance(a, dbus.Byte): return str(int(a)) if isinstance(a, int) or isinstance(a, long): return str(a) if isinstance(a, str) or isinstance(a, unicode): return '"' + str(a) + '"' if isinstance(a, list): return '[' + ', '.join([format_arg(x) for x in a]) + ']' if isinstance(a, dict): fmta = '{' first = True for k, v in a.items(): if first: first = False else: fmta += ', ' fmta += format_arg(k) + ': ' + format_arg(v) return fmta + '}' # fallback return repr(a) s = '' for a in args: if s: s += ' ' s += format_arg(a) if s: s = ' ' + s return s def log(self, msg): '''Log a message, prefixed with a timestamp. If a log file was specified in the constructor, it is written there, otherwise it goes to stdout. ''' if self.logfile: fd = self.logfile else: fd = sys.stdout fd.write('%.3f %s\n' % (time.time(), msg)) fd.flush() @dbus.service.method(dbus.INTROSPECTABLE_IFACE, in_signature='', out_signature='s', path_keyword='object_path', connection_keyword='connection') def Introspect(self, object_path, connection): '''Return XML description of this object's interfaces, methods and signals. This wraps dbus-python's Introspect() method to include the dynamic methods and properties. ''' # temporarily add our dynamic methods cls = self.__class__.__module__ + '.' + self.__class__.__name__ orig_interfaces = self._dbus_class_table[cls] mock_interfaces = orig_interfaces.copy() for interface, methods in self.methods.items(): for method in methods: mock_interfaces.setdefault(interface, {})[method] = self.methods[interface][method][3] self._dbus_class_table[cls] = mock_interfaces xml = dbus.service.Object.Introspect(self, object_path, connection) tree = ElementTree.fromstring(xml) for name in self.props: # We might have properties for new interfaces we don't know about # yet. Try to find an existing <interface> node named after our # interface to append to, and create one if we can't. interface = tree.find(".//interface[@name='%s']" % name) if interface is None: interface = ElementTree.Element("interface", {"name": name}) tree.append(interface) for prop, val in self.props[name].items(): if val is None: # can't guess type from None, skip continue elem = ElementTree.Element("property", { "name": prop, # We don't store the signature anywhere, so guess it. "type": dbus.lowlevel.Message.guess_signature(val), "access": "readwrite"}) interface.append(elem) xml = ElementTree.tostring(tree, encoding='utf8', method='xml').decode('utf8') # restore original class table self._dbus_class_table[cls] = orig_interfaces return xml # Overwrite dbus-python's _method_lookup(), as that offers no way to have the # same method name on different interfaces orig_method_lookup = dbus.service._method_lookup def _dbusmock_method_lookup(obj, method_name, dbus_interface): try: m = obj.methods[dbus_interface or obj.interface][method_name] return (m[3], m[3]) except KeyError: return orig_method_lookup(obj, method_name, dbus_interface) dbus.service._method_lookup = _dbusmock_method_lookup # # Helper API for templates # def get_objects(): '''Return all existing object paths''' return objects.keys() def get_object(path): '''Return object for a given object path''' return objects[path]
./CrossVul/dataset_final_sorted/CWE-20/py/good_1467_1
crossvul-python_data_good_1232_1
### # Copyright (c) 2002-2004, Jeremiah Fincher # Copyright (c) 2008-2009, James McCoy # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### from __future__ import division import re import math import cmath import types import string import supybot.utils as utils from supybot.commands import * import supybot.utils.minisix as minisix import supybot.callbacks as callbacks from supybot.i18n import PluginInternationalization, internationalizeDocstring _ = PluginInternationalization('Math') from .local import convertcore from .evaluator import safe_eval, InvalidNode, SAFE_ENV baseArg = ('int', 'base', lambda i: i <= 36) class Math(callbacks.Plugin): """Provides commands to work with math, such as a calculator and a unit converter.""" @internationalizeDocstring def base(self, irc, msg, args, frm, to, number): """<fromBase> [<toBase>] <number> Converts <number> from base <fromBase> to base <toBase>. If <toBase> is left out, it converts to decimal. """ if not number: number = str(to) to = 10 try: irc.reply(self._convertBaseToBase(number, to, frm)) except ValueError: irc.error(_('Invalid <number> for base %s: %s') % (frm, number)) base = wrap(base, [('int', 'base', lambda i: 2 <= i <= 36), optional(('int', 'base', lambda i: 2 <= i <= 36), 10), additional('something')]) def _convertDecimalToBase(self, number, base): """Convert a decimal number to another base; returns a string.""" if number == 0: return '0' elif number < 0: negative = True number = -number else: negative = False digits = [] while number != 0: digit = number % base if digit >= 10: digit = string.ascii_uppercase[digit - 10] else: digit = str(digit) digits.append(digit) number = number // base digits.reverse() return '-'*negative + ''.join(digits) def _convertBaseToBase(self, number, toBase, fromBase): """Convert a number from any base, 2 through 36, to any other base, 2 through 36. Returns a string.""" number = minisix.long(str(number), fromBase) if toBase == 10: return str(number) return self._convertDecimalToBase(number, toBase) def _floatToString(self, x): if -1e-10 < x < 1e-10: return '0' elif -1e-10 < int(x) - x < 1e-10: return str(int(x)) else: return str(x) def _complexToString(self, x): realS = self._floatToString(x.real) imagS = self._floatToString(x.imag) if imagS == '0': return realS elif imagS == '1': imagS = '+i' elif imagS == '-1': imagS = '-i' elif x.imag < 0: imagS = '%si' % imagS else: imagS = '+%si' % imagS if realS == '0' and imagS == '0': return '0' elif realS == '0': return imagS.lstrip('+') elif imagS == '0': return realS else: return '%s%s' % (realS, imagS) @internationalizeDocstring def calc(self, irc, msg, args, text): """<math expression> Returns the value of the evaluated <math expression>. The syntax is Python syntax; the type of arithmetic is floating point. Floating point arithmetic is used in order to prevent a user from being able to crash to the bot with something like '10**10**10**10'. One consequence is that large values such as '10**24' might not be exact. """ try: self.log.info('evaluating %q from %s', text, msg.prefix) x = complex(safe_eval(text, allow_ints=False)) irc.reply(self._complexToString(x)) except OverflowError: maxFloat = math.ldexp(0.9999999999999999, 1024) irc.error(_('The answer exceeded %s or so.') % maxFloat) except InvalidNode as e: irc.error(_('Invalid syntax: %s') % e.args[0]) except NameError as e: irc.error(_('%s is not a defined function.') % e.args[0]) except Exception as e: irc.error(str(e)) calc = wrap(calc, ['text']) @internationalizeDocstring def icalc(self, irc, msg, args, text): """<math expression> This is the same as the calc command except that it allows integer math, and can thus cause the bot to suck up CPU. Hence it requires the 'trusted' capability to use. """ try: self.log.info('evaluating %q from %s', text, msg.prefix) x = safe_eval(text, allow_ints=True) irc.reply(str(x)) except OverflowError: maxFloat = math.ldexp(0.9999999999999999, 1024) irc.error(_('The answer exceeded %s or so.') % maxFloat) except InvalidNode as e: irc.error(_('Invalid syntax: %s') % e.args[0]) except NameError as e: irc.error(_('%s is not a defined function.') % str(e).split()[1]) except Exception as e: irc.error(utils.exnToString(e)) icalc = wrap(icalc, [('checkCapability', 'trusted'), 'text']) _rpnEnv = { 'dup': lambda s: s.extend([s.pop()]*2), 'swap': lambda s: s.extend([s.pop(), s.pop()]) } def rpn(self, irc, msg, args): """<rpn math expression> Returns the value of an RPN expression. """ stack = [] for arg in args: try: x = complex(arg) if x == abs(x): x = abs(x) stack.append(x) except ValueError: # Not a float. if arg in SAFE_ENV: f = SAFE_ENV[arg] if callable(f): called = False arguments = [] while not called and stack: arguments.append(stack.pop()) try: stack.append(f(*arguments)) called = True except TypeError: pass if not called: irc.error(_('Not enough arguments for %s') % arg) return else: stack.append(f) elif arg in self._rpnEnv: self._rpnEnv[arg](stack) else: arg2 = stack.pop() arg1 = stack.pop() s = '%s%s%s' % (arg1, arg, arg2) try: stack.append(safe_eval(s, allow_ints=False)) except SyntaxError: irc.error(format(_('%q is not a defined function.'), arg)) return if len(stack) == 1: irc.reply(str(self._complexToString(complex(stack[0])))) else: s = ', '.join(map(self._complexToString, list(map(complex, stack)))) irc.reply(_('Stack: [%s]') % s) @internationalizeDocstring def convert(self, irc, msg, args, number, unit1, unit2): """[<number>] <unit> to <other unit> Converts from <unit> to <other unit>. If number isn't given, it defaults to 1. For unit information, see 'units' command. """ try: digits = len(str(number).split('.')[1]) except IndexError: digits = 0 try: newNum = convertcore.convert(number, unit1, unit2) if isinstance(newNum, float): zeros = 0 for char in str(newNum).split('.')[1]: if char != '0': break zeros += 1 # Let's add one signifiant digit. Physicists would not like # that, but common people usually do not give extra zeros... # (for example, with '32 C to F', an extra digit would be # expected). newNum = round(newNum, digits + 1 + zeros) newNum = self._floatToString(newNum) irc.reply(str(newNum)) except convertcore.UnitDataError as ude: irc.error(str(ude)) convert = wrap(convert, [optional('float', 1.0),'something','to','text']) @internationalizeDocstring def units(self, irc, msg, args, type): """ [<type>] With no arguments, returns a list of measurement types, which can be passed as arguments. When called with a type as an argument, returns the units of that type. """ irc.reply(convertcore.units(type)) units = wrap(units, [additional('text')]) Class = Math # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
./CrossVul/dataset_final_sorted/CWE-20/py/good_1232_1
crossvul-python_data_bad_3659_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import re import time import urllib from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state from nova.api import validator from nova import block_device from nova import compute from nova.compute import instance_types from nova.compute import vm_states from nova import crypto from nova import db from nova import exception from nova import flags from nova.image import s3 from nova import log as logging from nova import network from nova.rpc import common as rpc_common from nova import quota from nova import utils from nova import volume FLAGS = flags.FLAGS flags.DECLARE('dhcp_domain', 'nova.network.manager') LOG = logging.getLogger(__name__) def validate_ec2_id(val): if not validator.validate_str()(val): raise exception.InvalidInstanceIDMalformed(val) try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: raise exception.InvalidInstanceIDMalformed(val) def _gen_key(context, user_id, key_name): """Generate a key This is a module level method because it is slow and we need to defer it into a process pool.""" # NOTE(vish): generating key pair is slow so check for legal # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass private_key, public_key, fingerprint = crypto.generate_key_pair() key = {} key['user_id'] = user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'private_key': private_key, 'fingerprint': fingerprint} # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | # stopped 80 _STATE_DESCRIPTION_MAP = { None: inst_state.PENDING, vm_states.ACTIVE: inst_state.RUNNING, vm_states.BUILDING: inst_state.PENDING, vm_states.REBUILDING: inst_state.PENDING, vm_states.DELETED: inst_state.TERMINATED, vm_states.SOFT_DELETE: inst_state.TERMINATED, vm_states.STOPPED: inst_state.STOPPED, vm_states.SHUTOFF: inst_state.SHUTOFF, vm_states.MIGRATING: inst_state.MIGRATE, vm_states.RESIZING: inst_state.RESIZE, vm_states.PAUSED: inst_state.PAUSE, vm_states.SUSPENDED: inst_state.SUSPEND, vm_states.RESCUED: inst_state.RESCUE, } def _state_description(vm_state, shutdown_terminate): """Map the vm state to the server status string""" if (vm_state == vm_states.SHUTOFF and not shutdown_terminate): name = inst_state.STOPPED else: name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) return {'code': inst_state.name_to_code(name), 'name': name} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: id = ec2utils.ec2_id_to_id(ec2_id) if ec2_id.startswith('snap-'): bdm['snapshot_id'] = id elif ec2_id.startswith('vol-'): bdm['volume_id'] = id ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Contruct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API(network_api=self.network_api, volume_api=self.volume_api) self.sgh = utils.import_object(FLAGS.security_group_handler) def __str__(self): return 'CloudController' def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: if not zone in available_zones: available_zones.append(zone) not_available_zones = [] for zone in [service.availability_zone for service in disabled_services if not service['availability_zone'] in available_zones]: if not zone in not_available_zones: not_available_zones.append(zone) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context, False) hosts = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, 'zoneState': ''}) hsvcs = [service for service in services if service['host'] == host] for svc in hsvcs: alive = utils.service_is_up(svc) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' rv['availabilityZoneInfo'].append({ 'zoneName': '| |- %s' % svc['binary'], 'zoneState': '%s %s %s' % (active, art, svc['updated_at'])}) return rv def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) snapshots = [self._format_snapshot(context, s) for s in snapshots] return {'snapshotSet': snapshots} def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) volume = self.volume_api.get(context, volume_id) snapshot = self.volume_api.create_snapshot( context, volume, None, kwargs.get('description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) snapshot = self.volume_api.get_snapshot(context, snapshot_id) self.volume_api.delete_snapshot(context, snapshot) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix if context.is_admin or not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): if not re.match('^[a-zA-Z0-9_\- ]+$', str(key_name)): err = _("Value (%s) for KeyName is invalid." " Content limited to Alphanumeric character, " "spaces, dashes, and underscore.") % key_name raise exception.EC2APIError(err) if len(str(key_name)) > 255: err = _("Value (%s) for Keyname is invalid." " Length exceeds maximum of 255.") % key_name raise exception.EC2APIError(err) LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user_id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_key_pair(self, context, key_name, public_key_material, **kwargs): LOG.audit(_("Import key %s"), key_name, context=context) try: db.key_pair_get(context, context.user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass public_key = base64.b64decode(public_key_material) fingerprint = crypto.generate_fingerprint(public_key) key = {} key['user_id'] = context.user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'keyName': key_name, 'keyFingerprint': fingerprint} def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): self.compute_api.ensure_default_security_group(context) if group_name or group_id: groups = [] if group_name: for name in group_name: group = db.security_group_get_by_name(context, context.project_id, name) groups.append(group) if group_id: for gid in group_id: group = db.security_group_get(context, gid) groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group.description g['groupName'] = group.name g['ownerId'] = group.project_id g['ipPermissions'] = [] for rule in group.rules: r = {} r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] if rule.protocol: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65535)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if not 'groups' in kwargs and not 'ip_ranges' in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): values = {} if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = db.security_group_get_by_name( context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() if not utils.is_valid_cidr(cidr_ip): # Raise exception for non-valid address raise exception.EC2APIError(_("Invalid CIDR")) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' if source_security_group_name: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return rule['id'] return False def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) rule_id = None rule_ids = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id rule_id = self._security_group_rule_exists(security_group, values_for_rule) if rule_id: db.security_group_rule_destroy(context, rule_id) rule_ids.append(rule_id) if rule_id: # NOTE(vish): we removed a rule, so refresh self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) postvalues = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values_for_rule): err = _('%s - This rule already exists in group') raise exception.EC2APIError(err % values_for_rule) postvalues.append(values_for_rule) allowed = quota.allowed_security_group_rules(context, security_group['id'], 1) if allowed < 1: msg = _("Quota exceeded, too many security group rules.") raise exception.EC2APIError(msg) rule_ids = [] for values_for_rule in postvalues: security_group_rule = db.security_group_rule_create( context, values_for_rule) rule_ids.append(security_group_rule['id']) if postvalues: self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_create_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): # Some validation to ensure that values match API spec. # - Alphanumeric characters, spaces, dashes, and underscores. # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. err = _("Value (%s) for parameter GroupName is invalid." " Content limited to Alphanumeric characters, " "spaces, dashes, and underscores.") % group_name # err not that of master ec2 implementation, as they fail to raise. raise exception.InvalidParameterValue(err=err) if len(str(group_name)) > 255: err = _("Value (%s) for parameter GroupName is invalid." " Length exceeds maximum of 255.") % group_name raise exception.InvalidParameterValue(err=err) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('group %s already exists') raise exception.EC2APIError(msg % group_name) if quota.allowed_security_groups(context, 1) < 1: msg = _("Quota exceeded, too many security groups.") raise exception.EC2APIError(msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) elif group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) if db.security_group_in_use(context, security_group.id): raise exception.InvalidGroup(reason="In Use") LOG.audit(_("Delete security group %s"), group_name, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh(context, security_group.id) return True def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if isinstance(instance_id, list): ec2_id = instance_id[0] else: ec2_id = instance_id validate_ec2_id(ec2_id) instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, instance_id) output = self.compute_api.get_console_output(context, instance) now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: validate_ec2_id(ec2_id) internal_id = ec2utils.ec2_id_to_id(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance', None): instance_id = volume['instance']['id'] instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['project_id'], volume['host'], instance_data, volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') is not None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None return v def create_volume(self, context, **kwargs): size = kwargs.get('size') if kwargs.get('snapshot_id') is not None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) snapshot = self.volume_api.get_snapshot(context, snapshot_id) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: snapshot = None LOG.audit(_("Create volume of %s GB"), size, context=context) availability_zone = kwargs.get('availability_zone', None) volume = self.volume_api.create(context, size, None, None, snapshot, availability_zone=availability_zone) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) try: volume = self.volume_api.get(context, volume_id) self.volume_api.delete(context, volume) except exception.InvalidVolume: raise exception.EC2APIError(_('Delete Failed')) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): validate_ec2_id(instance_id) validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) try: self.compute_api.attach_volume(context, instance, volume_id, device) except exception.InvalidVolume: raise exception.EC2APIError(_('Attach Failed.')) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def detach_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) try: instance = self.compute_api.detach_volume(context, volume_id=volume_id) except exception.InvalidVolume: raise exception.EC2APIError(_('Detach Volume Failed.')) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, context, instance_ref, result, key): kernel_uuid = instance_ref['kernel_id'] if kernel_uuid is None or kernel_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki') def _format_ramdisk_id(self, context, instance_ref, result, key): ramdisk_uuid = instance_ref['ramdisk_id'] if ramdisk_uuid is None or ramdisk_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid, 'ari') def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.EC2APIError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance_id, tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): result['disableApiTermination'] = instance['disable_terminate'] def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): if instance['shutdown_terminate']: result['instanceInitiatedShutdownBehavior'] = 'terminate' else: result['instanceInitiatedShutdownBehavior'] = 'stop' def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(context, instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(context, instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = base64.b64decode(instance['user_data']) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.EC2APIError( _('attribute not supported: %s') % attribute) ec2_instance_id = instance_id validate_ec2_id(instance_id) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) result = {'instance_id': ec2_instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_terminate_instances(self, context, instance_id, previous_states): instances_set = [] for (ec2_id, previous_state) in zip(instance_id, previous_states): i = {} i['instanceId'] = ec2_id i['previousState'] = _state_description(previous_state['vm_state'], previous_state['shutdown_terminate']) try: internal_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, internal_id) i['shutdownState'] = _state_description(instance['vm_state'], instance['shutdown_terminate']) except exception.NotFound: i['shutdownState'] = _state_description(vm_states.DELETED, True) instances_set.append(i) return {'instancesSet': instances_set} def _format_instance_bdm(self, context, instance_id, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, instance_id): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '-', 'status': vol['status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or block_device.DEFAULT_ROOT_DEV_NAME) @staticmethod def _format_instance_type(instance, result): if instance['instance_type']: result['instanceType'] = instance['instance_type'].get('name') else: result['instanceType'] = None @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) try: instance = self.compute_api.get(context, internal_id) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts, sort_dir='asc') except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id image_uuid = instance['image_ref'] i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid) self._format_kernel_id(context, instance, i, 'kernelId') self._format_ramdisk_id(context, instance, i, 'ramdiskId') i['instanceState'] = _state_description( instance['vm_state'], instance['shutdown_terminate']) fixed_ip = None floating_ip = None ip_info = ec2utils.get_ip_info_for_instance(context, instance) if ip_info['fixed_ips']: fixed_ip = ip_info['fixed_ips'][0] if ip_info['floating_ips']: floating_ip = ip_info['floating_ips'][0] if ip_info['fixed_ip6s']: i['dnsNameV6'] = ip_info['fixed_ip6s'][0] if FLAGS.ec2_private_dns_show_ip: i['privateDnsName'] = fixed_ip else: i['privateDnsName'] = instance['hostname'] i['privateIpAddress'] = fixed_ip i['publicDnsName'] = floating_ip i['ipAddress'] = floating_ip or fixed_ip i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance_id, i['rootDeviceName'], i) host = instance['host'] services = db.service_get_all_by_host(context.elevated(), host) zone = ec2utils.get_availability_zone_by_host(services, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, **kwargs): return self.format_addresses(context) def format_addresses(self, context): addresses = [] floaters = self.network_api.get_floating_ips_by_project(context) for floating_ip_ref in floaters: if floating_ip_ref['project_id'] is None: continue address = floating_ip_ref['address'] ec2_id = None if floating_ip_ref['fixed_ip_id']: fixed_id = floating_ip_ref['fixed_ip_id'] fixed = self.network_api.get_fixed_ip(context, fixed_id) if fixed['instance_id'] is not None: ec2_id = ec2utils.id_to_ec2_id(fixed['instance_id']) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) try: public_ip = self.network_api.allocate_floating_ip(context) return {'publicIp': public_ip} except rpc_common.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': raise exception.NoMoreFloatingIps() else: raise def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) self.network_api.release_floating_ip(context, address=public_ip) return {'return': "true"} def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) self.compute_api.associate_floating_ip(context, instance, address=public_ip) return {'return': "true"} def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, address=public_ip) return {'return': "true"} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = ec2utils.id_to_glance_id(context, kernel['id']) if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context, ramdisk['id']) for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) image_uuid = ec2utils.id_to_glance_id(context, image['id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFound(image_id=kwargs['image_id']) if image_state != 'available': raise exception.EC2APIError(_('Image must be available')) (instances, resv_id) = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_href=image_uuid, min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, resv_id) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) previous_states = [] for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) previous_states.append(instance) self.compute_api.delete(context, instance) return self._format_terminate_instances(context, instance_id, previous_states) def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.reboot(context, instance, 'HARD') return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to stop instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.stop(context, instance) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to start instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.start(context, instance) return True def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if ec2utils.image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by GlanceImageService to S3 format.""" i = {} image_type = ec2utils.image_type(image.get('container_format')) ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari') if FLAGS.auth_strategy == 'deprecated': i['imageOwnerId'] = image['properties'].get('project_id') else: i['imageOwnerId'] = image.get('owner') img_loc = image['properties'].get('image_location') if img_loc: i['imageLocation'] = img_loc else: i['imageLocation'] = "%s (%s)" % (img_loc, name) i['name'] = name if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name i['name'] = img_loc i['imageState'] = self._get_image_state(image) i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = not not image.get('is_public') i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or block_device.DEFAULT_ROOT_DEV_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return {'imageId': image_id} def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = ec2utils.image_type(image.get('container_format')) image_id = ec2utils.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and kwargs.get('name'): image_location = kwargs['name'] if image_location is None: raise exception.EC2APIError(_('imageLocation is required')) metadata = {'properties': {'image_location': image_location}} if kwargs.get('name'): metadata['name'] = kwargs['name'] else: metadata['name'] = image_location if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = kwargs.get( 'root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): _prop_root_dev_name = block_device.properties_root_device_name result['rootDeviceName'] = _prop_root_dev_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.EC2APIError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.EC2APIError(_('attribute not supported: %s') % attribute) if not 'user_group' in kwargs: raise exception.EC2APIError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.EC2APIError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.EC2APIError(msg) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') try: return self.image_service.update(context, internal_id, image) except exception.ImageNotAuthorized: msg = _('Not allowed to modify attributes for image %s') raise exception.EC2APIError(msg % image_id) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) validate_ec2_id(instance_id) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF): restart_instance = True self.compute_api.stop(context, instance) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 if time.time() > start_time + timeout: raise exception.EC2APIError( _('Couldn\'t stop instance with in %d sec') % timeout) src_image = self._get_image(context, instance['image_ref']) properties = src_image['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] mapping = [] bdms = db.block_device_mapping_get_all_by_instance(context, instance_id) for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( context, volume, volume['display_name'], volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in _properties_get_mappings(properties): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): src_image.pop(attr, None) image_id = self._register_image(context, src_image) if restart_instance: self.compute_api.start(context, instance_id=instance_id) return {'imageId': image_id}
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3659_0
crossvul-python_data_bad_3767_3
from __future__ import absolute_import, unicode_literals import copy import datetime from email.header import Header import os import re import sys import time import warnings from io import BytesIO from pprint import pformat try: from urllib.parse import quote, parse_qsl, urlencode, urljoin, urlparse except ImportError: # Python 2 from urllib import quote, urlencode from urlparse import parse_qsl, urljoin, urlparse from django.utils.six.moves import http_cookies # Some versions of Python 2.7 and later won't need this encoding bug fix: _cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"') # See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256 _tc = http_cookies.SimpleCookie() try: _tc.load(str('foo:bar=1')) _cookie_allows_colon_in_names = True except http_cookies.CookieError: _cookie_allows_colon_in_names = False if _cookie_encodes_correctly and _cookie_allows_colon_in_names: SimpleCookie = http_cookies.SimpleCookie else: Morsel = http_cookies.Morsel class SimpleCookie(http_cookies.SimpleCookie): if not _cookie_encodes_correctly: def value_encode(self, val): # Some browsers do not support quoted-string from RFC 2109, # including some versions of Safari and Internet Explorer. # These browsers split on ';', and some versions of Safari # are known to split on ', '. Therefore, we encode ';' and ',' # SimpleCookie already does the hard work of encoding and decoding. # It uses octal sequences like '\\012' for newline etc. # and non-ASCII chars. We just make use of this mechanism, to # avoid introducing two encoding schemes which would be confusing # and especially awkward for javascript. # NB, contrary to Python docs, value_encode returns a tuple containing # (real val, encoded_val) val, encoded = super(SimpleCookie, self).value_encode(val) encoded = encoded.replace(";", "\\073").replace(",","\\054") # If encoded now contains any quoted chars, we need double quotes # around the whole string. if "\\" in encoded and not encoded.startswith('"'): encoded = '"' + encoded + '"' return val, encoded if not _cookie_allows_colon_in_names: def load(self, rawdata): self.bad_cookies = set() super(SimpleCookie, self).load(force_str(rawdata)) for key in self.bad_cookies: del self[key] # override private __set() method: # (needed for using our Morsel, and for laxness with CookieError def _BaseCookie__set(self, key, real_value, coded_value): key = force_str(key) try: M = self.get(key, Morsel()) M.set(key, real_value, coded_value) dict.__setitem__(self, key, M) except http_cookies.CookieError: self.bad_cookies.add(key) dict.__setitem__(self, key, http_cookies.Morsel()) from django.conf import settings from django.core import signing from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation from django.core.files import uploadhandler from django.http.multipartparser import MultiPartParser from django.http.utils import * from django.utils.datastructures import MultiValueDict, ImmutableList from django.utils.encoding import force_bytes, force_str, force_text, iri_to_uri from django.utils.http import cookie_date from django.utils import six from django.utils import timezone RESERVED_CHARS="!*'();:@&=+$,/?%#[]" absolute_http_url_re = re.compile(r"^https?://", re.I) class Http404(Exception): pass RAISE_ERROR = object() def build_request_repr(request, path_override=None, GET_override=None, POST_override=None, COOKIES_override=None, META_override=None): """ Builds and returns the request's representation string. The request's attributes may be overridden by pre-processed values. """ # Since this is called as part of error handling, we need to be very # robust against potentially malformed input. try: get = (pformat(GET_override) if GET_override is not None else pformat(request.GET)) except Exception: get = '<could not parse>' if request._post_parse_error: post = '<could not parse>' else: try: post = (pformat(POST_override) if POST_override is not None else pformat(request.POST)) except Exception: post = '<could not parse>' try: cookies = (pformat(COOKIES_override) if COOKIES_override is not None else pformat(request.COOKIES)) except Exception: cookies = '<could not parse>' try: meta = (pformat(META_override) if META_override is not None else pformat(request.META)) except Exception: meta = '<could not parse>' path = path_override if path_override is not None else request.path return force_str('<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % (request.__class__.__name__, path, six.text_type(get), six.text_type(post), six.text_type(cookies), six.text_type(meta))) class UnreadablePostError(IOError): pass class HttpRequest(object): """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {} self.path = '' self.path_info = '' self.method = None self._post_parse_error = False def __repr__(self): return build_request_repr(self) def get_host(self): """Returns the HTTP host using the environment or request headers.""" # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = str(self.META['SERVER_PORT']) if server_port != ('443' if self.is_secure() else '80'): host = '%s:%s' % (host, server_port) return host def get_full_path(self): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s' % (self.path, ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '') def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None): """ Attempts to return a signed cookie. If the signature fails or the cookie has expired, raises an exception... unless you provide the default argument in which case that value will be returned instead. """ try: cookie_value = self.COOKIES[key] except KeyError: if default is not RAISE_ERROR: return default else: raise try: value = signing.get_cookie_signer(salt=key + salt).unsign( cookie_value, max_age=max_age) except signing.BadSignature: if default is not RAISE_ERROR: return default else: raise return value def build_absolute_uri(self, location=None): """ Builds an absolute URI from the location and the variables available in this request. If no location is specified, the absolute URI is built on ``request.get_full_path()``. """ if not location: location = self.get_full_path() if not absolute_http_url_re.match(location): current_uri = '%s://%s%s' % ('https' if self.is_secure() else 'http', self.get_host(), self.path) location = urljoin(current_uri, location) return iri_to_uri(location) def _is_secure(self): return os.environ.get("HTTPS") == "on" def is_secure(self): # First, check the SECURE_PROXY_SSL_HEADER setting. if settings.SECURE_PROXY_SSL_HEADER: try: header, value = settings.SECURE_PROXY_SSL_HEADER except ValueError: raise ImproperlyConfigured('The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.') if self.META.get(header, None) == value: return True # Failing that, fall back to _is_secure(), which is a hook for # subclasses to implement. return self._is_secure() def is_ajax(self): return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' @property def encoding(self): return self._encoding @encoding.setter def encoding(self, val): """ Sets the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, it is removed and recreated on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, '_get'): del self._get if hasattr(self, '_post'): del self._post def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] @property def upload_handlers(self): if not self._upload_handlers: # If there are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers @upload_handlers.setter def upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def parse_file_upload(self, META, post_data): """Returns a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning="You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() @property def body(self): if not hasattr(self, '_body'): if self._read_started: raise Exception("You cannot access body after reading from request's data stream") try: self._body = self.read() except IOError as e: six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2]) self._stream = BytesIO(self._body) return self._body @property def raw_post_data(self): warnings.warn('HttpRequest.raw_post_data has been deprecated. Use HttpRequest.body instead.', DeprecationWarning) return self.body def _mark_post_parse_error(self): self._post = QueryDict('') self._files = MultiValueDict() self._post_parse_error = True def _load_post_and_files(self): # Populates self._post and self._files if self.method != 'POST': self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_body'): self._mark_post_parse_error() return if self.META.get('CONTENT_TYPE', '').startswith('multipart'): if hasattr(self, '_body'): # Use already read data data = BytesIO(self._body) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except: # An error occured while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. # Mark that an error occured. This allows self.__repr__ to # be explicit about it instead of simply representing an # empty POST self._mark_post_parse_error() raise else: self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict() ## File-like and iterator interface. ## ## Expects self._stream to be set to an appropriate source of bytes by ## a corresponding request subclass (e.g. WSGIRequest). ## Also when request data has already been read by request.POST or ## request.body, self._stream points to a BytesIO instance ## containing that data. def read(self, *args, **kwargs): self._read_started = True return self._stream.read(*args, **kwargs) def readline(self, *args, **kwargs): self._read_started = True return self._stream.readline(*args, **kwargs) def xreadlines(self): while True: buf = self.readline() if not buf: break yield buf __iter__ = xreadlines def readlines(self): return list(iter(self)) class QueryDict(MultiValueDict): """ A specialized MultiValueDict that takes a query string when initialized. This is immutable unless you create a copy of it. Values retrieved from this class are converted from the given encoding (DEFAULT_CHARSET by default) to unicode. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string, mutable=False, encoding=None): super(QueryDict, self).__init__() if not encoding: encoding = settings.DEFAULT_CHARSET self.encoding = encoding if six.PY3: for key, value in parse_qsl(query_string or '', keep_blank_values=True, encoding=encoding): self.appendlist(key, value) else: for key, value in parse_qsl(query_string or '', keep_blank_values=True): self.appendlist(force_text(key, encoding, errors='replace'), force_text(value, encoding, errors='replace')) self._mutable = mutable @property def encoding(self): if self._encoding is None: self._encoding = settings.DEFAULT_CHARSET return self._encoding @encoding.setter def encoding(self, value): self._encoding = value def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super(QueryDict, self).__setitem__(key, value) def __delitem__(self, key): self._assert_mutable() super(QueryDict, self).__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in six.iterlists(self): result.setlist(key, value) return result def __deepcopy__(self, memo): result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in six.iterlists(self): result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = bytes_to_text(key, self.encoding) list_ = [bytes_to_text(elt, self.encoding) for elt in list_] super(QueryDict, self).setlist(key, list_) def setlistdefault(self, key, default_list=None): self._assert_mutable() return super(QueryDict, self).setlistdefault(key, default_list) def appendlist(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super(QueryDict, self).appendlist(key, value) def pop(self, key, *args): self._assert_mutable() return super(QueryDict, self).pop(key, *args) def popitem(self): self._assert_mutable() return super(QueryDict, self).popitem() def clear(self): self._assert_mutable() super(QueryDict, self).clear() def setdefault(self, key, default=None): self._assert_mutable() key = bytes_to_text(key, self.encoding) default = bytes_to_text(default, self.encoding) return super(QueryDict, self).setdefault(key, default) def copy(self): """Returns a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Returns an encoded string of all query string arguments. :arg safe: Used to specify characters which do not require quoting, for example:: >>> q = QueryDict('', mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: safe = force_bytes(safe, self.encoding) encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe))) else: encode = lambda k, v: urlencode({k: v}) for k, list_ in self.lists(): k = force_bytes(k, self.encoding) output.extend([encode(k, force_bytes(v, self.encoding)) for v in list_]) return '&'.join(output) def parse_cookie(cookie): if cookie == '': return {} if not isinstance(cookie, http_cookies.BaseCookie): try: c = SimpleCookie() c.load(cookie) except http_cookies.CookieError: # Invalid cookie return {} else: c = cookie cookiedict = {} for key in c.keys(): cookiedict[key] = c.get(key).value return cookiedict class BadHeaderError(ValueError): pass class HttpResponse(object): """A basic HTTP response, with content and dictionary-accessed headers.""" status_code = 200 def __init__(self, content='', content_type=None, status=None, mimetype=None): # _headers is a mapping of the lower-case name to the original case of # the header (required for working with legacy systems) and the header # value. Both the name of the header and its value are ASCII strings. self._headers = {} self._charset = settings.DEFAULT_CHARSET if mimetype: warnings.warn("Using mimetype keyword argument is deprecated, use" " content_type instead", PendingDeprecationWarning) content_type = mimetype if not content_type: content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE, self._charset) # content is a bytestring. See the content property methods. self.content = content self.cookies = SimpleCookie() if status: self.status_code = status self['Content-Type'] = content_type def serialize(self): """Full HTTP message, including headers, as a bytestring.""" headers = [ ('%s: %s' % (key, value)).encode('us-ascii') for key, value in self._headers.values() ] return b'\r\n'.join(headers) + b'\r\n\r\n' + self.content if six.PY3: __bytes__ = serialize else: __str__ = serialize def _convert_to_charset(self, value, charset, mime_encode=False): """Converts headers key/value to ascii/latin1 native strings. `charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and `value` value can't be represented in the given charset, MIME-encoding is applied. """ if not isinstance(value, (bytes, six.text_type)): value = str(value) try: if six.PY3: if isinstance(value, str): # Ensure string is valid in given charset value.encode(charset) else: # Convert bytestring using given charset value = value.decode(charset) else: if isinstance(value, str): # Ensure string is valid in given charset value.decode(charset) else: # Convert unicode string to given charset value = value.encode(charset) except UnicodeError as e: if mime_encode: # Wrapping in str() is a workaround for #12422 under Python 2. value = str(Header(value, 'utf-8').encode()) else: e.reason += ', HTTP response headers must be in %s format' % charset raise if str('\n') in value or str('\r') in value: raise BadHeaderError("Header values can't contain newlines (got %r)" % value) return value def __setitem__(self, header, value): header = self._convert_to_charset(header, 'ascii') value = self._convert_to_charset(value, 'latin1', mime_encode=True) self._headers[header.lower()] = (header, value) def __delitem__(self, header): try: del self._headers[header.lower()] except KeyError: pass def __getitem__(self, header): return self._headers[header.lower()][1] def __getstate__(self): # SimpleCookie is not pickeable with pickle.HIGHEST_PROTOCOL, so we # serialise to a string instead state = self.__dict__.copy() state['cookies'] = str(state['cookies']) return state def __setstate__(self, state): self.__dict__.update(state) self.cookies = SimpleCookie(self.cookies) def has_header(self, header): """Case-insensitive check for a header.""" return header.lower() in self._headers __contains__ = has_header def items(self): return self._headers.values() def get(self, header, alternate=None): return self._headers.get(header.lower(), (None, alternate))[1] def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False): """ Sets a cookie. ``expires`` can be: - a string in the correct format, - a naive ``datetime.datetime`` object in UTC, - an aware ``datetime.datetime`` object in any time zone. If it is a ``datetime.datetime`` object then ``max_age`` will be calculated. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): if timezone.is_aware(expires): expires = timezone.make_naive(expires, timezone.utc) delta = expires - expires.utcnow() # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta = delta + datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]['expires'] = expires if max_age is not None: self.cookies[key]['max-age'] = max_age # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]['expires'] = cookie_date(time.time() + max_age) if path is not None: self.cookies[key]['path'] = path if domain is not None: self.cookies[key]['domain'] = domain if secure: self.cookies[key]['secure'] = True if httponly: self.cookies[key]['httponly'] = True def set_signed_cookie(self, key, value, salt='', **kwargs): value = signing.get_cookie_signer(salt=key + salt).sign(value) return self.set_cookie(key, value, **kwargs) def delete_cookie(self, key, path='/', domain=None): self.set_cookie(key, max_age=0, path=path, domain=domain, expires='Thu, 01-Jan-1970 00:00:00 GMT') @property def content(self): if self.has_header('Content-Encoding'): def make_bytes(value): if isinstance(value, int): value = six.text_type(value) if isinstance(value, six.text_type): value = value.encode('ascii') # force conversion to bytes in case chunk is a subclass return bytes(value) return b''.join(make_bytes(e) for e in self._container) return b''.join(force_bytes(e, self._charset) for e in self._container) @content.setter def content(self, value): if hasattr(value, '__iter__') and not isinstance(value, (bytes, six.string_types)): self._container = value self._base_content_is_iter = True else: self._container = [value] self._base_content_is_iter = False def __iter__(self): self._iterator = iter(self._container) return self def __next__(self): chunk = next(self._iterator) if isinstance(chunk, int): chunk = six.text_type(chunk) if isinstance(chunk, six.text_type): chunk = chunk.encode(self._charset) # force conversion to bytes in case chunk is a subclass return bytes(chunk) next = __next__ # Python 2 compatibility def close(self): if hasattr(self._container, 'close'): self._container.close() # The remaining methods partially implement the file-like object interface. # See http://docs.python.org/lib/bltin-file-objects.html def write(self, content): if self._base_content_is_iter: raise Exception("This %s instance is not writable" % self.__class__) self._container.append(content) def flush(self): pass def tell(self): if self._base_content_is_iter: raise Exception("This %s instance cannot tell its position" % self.__class__) return sum([len(chunk) for chunk in self]) class HttpResponseRedirectBase(HttpResponse): allowed_schemes = ['http', 'https', 'ftp'] def __init__(self, redirect_to, *args, **kwargs): parsed = urlparse(redirect_to) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise SuspiciousOperation("Unsafe redirect to URL with protocol '%s'" % parsed.scheme) super(HttpResponseRedirectBase, self).__init__(*args, **kwargs) self['Location'] = iri_to_uri(redirect_to) class HttpResponseRedirect(HttpResponseRedirectBase): status_code = 302 class HttpResponsePermanentRedirect(HttpResponseRedirectBase): status_code = 301 class HttpResponseNotModified(HttpResponse): status_code = 304 def __init__(self, *args, **kwargs): super(HttpResponseNotModified, self).__init__(*args, **kwargs) del self['content-type'] @HttpResponse.content.setter def content(self, value): if value: raise AttributeError("You cannot set content to a 304 (Not Modified) response") self._container = [] class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods, *args, **kwargs): super(HttpResponseNotAllowed, self).__init__(*args, **kwargs) self['Allow'] = ', '.join(permitted_methods) class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 # A backwards compatible alias for HttpRequest.get_host. def get_host(request): return request.get_host() # It's neither necessary nor appropriate to use # django.utils.encoding.smart_text for parsing URLs and form inputs. Thus, # this slightly more restricted function, used by QueryDict. def bytes_to_text(s, encoding): """ Converts basestring objects to unicode, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Returns any non-basestring objects without change. """ if isinstance(s, bytes): return six.text_type(s, encoding, 'replace') else: return s
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3767_3
crossvul-python_data_good_3766_2
import urlparse from django.conf import settings from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, QueryDict from django.template.response import TemplateResponse from django.utils.http import base36_to_int from django.utils.translation import ugettext as _ from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_protect # Avoid shadowing the login() and logout() views below. from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm, SetPasswordForm, PasswordChangeForm from django.contrib.auth.models import User from django.contrib.auth.tokens import default_token_generator from django.contrib.sites.models import get_current_site @sensitive_post_parameters() @csrf_protect @never_cache def login(request, template_name='registration/login.html', redirect_field_name=REDIRECT_FIELD_NAME, authentication_form=AuthenticationForm, current_app=None, extra_context=None): """ Displays the login form and handles the login action. """ redirect_to = request.REQUEST.get(redirect_field_name, '') if request.method == "POST": form = authentication_form(data=request.POST) if form.is_valid(): netloc = urlparse.urlparse(redirect_to)[1] # Use default setting if redirect_to is empty if not redirect_to: redirect_to = settings.LOGIN_REDIRECT_URL # Heavier security check -- don't allow redirection to a different # host. elif netloc and netloc != request.get_host(): redirect_to = settings.LOGIN_REDIRECT_URL # Okay, security checks complete. Log the user in. auth_login(request, form.get_user()) if request.session.test_cookie_worked(): request.session.delete_test_cookie() return HttpResponseRedirect(redirect_to) else: form = authentication_form(request) request.session.set_test_cookie() current_site = get_current_site(request) context = { 'form': form, redirect_field_name: redirect_to, 'site': current_site, 'site_name': current_site.name, } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) def logout(request, next_page=None, template_name='registration/logged_out.html', redirect_field_name=REDIRECT_FIELD_NAME, current_app=None, extra_context=None): """ Logs out the user and displays 'You are logged out' message. """ auth_logout(request) redirect_to = request.REQUEST.get(redirect_field_name, '') if redirect_to: netloc = urlparse.urlparse(redirect_to)[1] # Security check -- don't allow redirection to a different host. if not (netloc and netloc != request.get_host()): return HttpResponseRedirect(redirect_to) if next_page is None: current_site = get_current_site(request) context = { 'site': current_site, 'site_name': current_site.name, 'title': _('Logged out') } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) else: # Redirect to this page until the session has been cleared. return HttpResponseRedirect(next_page or request.path) def logout_then_login(request, login_url=None, current_app=None, extra_context=None): """ Logs out the user if he is logged in. Then redirects to the log-in page. """ if not login_url: login_url = settings.LOGIN_URL return logout(request, login_url, current_app=current_app, extra_context=extra_context) def redirect_to_login(next, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME): """ Redirects the user to the login page, passing the given 'next' page """ if not login_url: login_url = settings.LOGIN_URL login_url_parts = list(urlparse.urlparse(login_url)) if redirect_field_name: querystring = QueryDict(login_url_parts[4], mutable=True) querystring[redirect_field_name] = next login_url_parts[4] = querystring.urlencode(safe='/') return HttpResponseRedirect(urlparse.urlunparse(login_url_parts)) # 4 views for password reset: # - password_reset sends the mail # - password_reset_done shows a success message for the above # - password_reset_confirm checks the link the user clicked and # prompts for a new password # - password_reset_complete shows a success message for the above @csrf_protect def password_reset(request, is_admin_site=False, template_name='registration/password_reset_form.html', email_template_name='registration/password_reset_email.html', subject_template_name='registration/password_reset_subject.txt', password_reset_form=PasswordResetForm, token_generator=default_token_generator, post_reset_redirect=None, from_email=None, current_app=None, extra_context=None): if post_reset_redirect is None: post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done') if request.method == "POST": form = password_reset_form(request.POST) if form.is_valid(): opts = { 'use_https': request.is_secure(), 'token_generator': token_generator, 'from_email': from_email, 'email_template_name': email_template_name, 'subject_template_name': subject_template_name, 'request': request, } if is_admin_site: opts = dict(opts, domain_override=request.get_host()) form.save(**opts) return HttpResponseRedirect(post_reset_redirect) else: form = password_reset_form() context = { 'form': form, } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) def password_reset_done(request, template_name='registration/password_reset_done.html', current_app=None, extra_context=None): context = {} if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) # Doesn't need csrf_protect since no-one can guess the URL @sensitive_post_parameters() @never_cache def password_reset_confirm(request, uidb36=None, token=None, template_name='registration/password_reset_confirm.html', token_generator=default_token_generator, set_password_form=SetPasswordForm, post_reset_redirect=None, current_app=None, extra_context=None): """ View that checks the hash in a password reset link and presents a form for entering a new password. """ assert uidb36 is not None and token is not None # checked by URLconf if post_reset_redirect is None: post_reset_redirect = reverse('django.contrib.auth.views.password_reset_complete') try: uid_int = base36_to_int(uidb36) user = User.objects.get(id=uid_int) except (ValueError, User.DoesNotExist): user = None if user is not None and token_generator.check_token(user, token): validlink = True if request.method == 'POST': form = set_password_form(user, request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(post_reset_redirect) else: form = set_password_form(None) else: validlink = False form = None context = { 'form': form, 'validlink': validlink, } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) def password_reset_complete(request, template_name='registration/password_reset_complete.html', current_app=None, extra_context=None): context = { 'login_url': settings.LOGIN_URL } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) @sensitive_post_parameters() @csrf_protect @login_required def password_change(request, template_name='registration/password_change_form.html', post_change_redirect=None, password_change_form=PasswordChangeForm, current_app=None, extra_context=None): if post_change_redirect is None: post_change_redirect = reverse('django.contrib.auth.views.password_change_done') if request.method == "POST": form = password_change_form(user=request.user, data=request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(post_change_redirect) else: form = password_change_form(user=request.user) context = { 'form': form, } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) @login_required def password_change_done(request, template_name='registration/password_change_done.html', current_app=None, extra_context=None): context = {} if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app)
./CrossVul/dataset_final_sorted/CWE-20/py/good_3766_2
crossvul-python_data_bad_8_0
from importlib import import_module from os import path, listdir from string import lower from debug import logger import paths class MsgBase(object): def encode(self): self.data = {"": lower(type(self).__name__)} def constructObject(data): try: classBase = eval(data[""] + "." + data[""].title()) except NameError: logger.error("Don't know how to handle message type: \"%s\"", data[""]) return None try: returnObj = classBase() returnObj.decode(data) except KeyError as e: logger.error("Missing mandatory key %s", e) return None except: logger.error("classBase fail", exc_info=True) return None else: return returnObj if paths.frozen is not None: import messagetypes.message import messagetypes.vote else: for mod in listdir(path.dirname(__file__)): if mod == "__init__.py": continue splitted = path.splitext(mod) if splitted[1] != ".py": continue try: import_module("." + splitted[0], "messagetypes") except ImportError: logger.error("Error importing %s", mod, exc_info=True) else: logger.debug("Imported message type module %s", mod)
./CrossVul/dataset_final_sorted/CWE-20/py/bad_8_0
crossvul-python_data_bad_3767_0
from django.conf.urls import patterns, url from django.contrib.auth import context_processors from django.contrib.auth.urls import urlpatterns from django.contrib.auth.views import password_reset from django.contrib.auth.decorators import login_required from django.contrib.messages.api import info from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import Template, RequestContext from django.views.decorators.cache import never_cache @never_cache def remote_user_auth_view(request): "Dummy view for remote user tests" t = Template("Username is {{ user }}.") c = RequestContext(request, {}) return HttpResponse(t.render(c)) def auth_processor_no_attr_access(request): r1 = render_to_response('context_processors/auth_attrs_no_access.html', RequestContext(request, {}, processors=[context_processors.auth])) # *After* rendering, we check whether the session was accessed return render_to_response('context_processors/auth_attrs_test_access.html', {'session_accessed':request.session.accessed}) def auth_processor_attr_access(request): r1 = render_to_response('context_processors/auth_attrs_access.html', RequestContext(request, {}, processors=[context_processors.auth])) return render_to_response('context_processors/auth_attrs_test_access.html', {'session_accessed':request.session.accessed}) def auth_processor_user(request): return render_to_response('context_processors/auth_attrs_user.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_perms(request): return render_to_response('context_processors/auth_attrs_perms.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_perm_in_perms(request): return render_to_response('context_processors/auth_attrs_perm_in_perms.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_messages(request): info(request, "Message 1") return render_to_response('context_processors/auth_attrs_messages.html', RequestContext(request, {}, processors=[context_processors.auth])) def userpage(request): pass # special urls for auth test cases urlpatterns = urlpatterns + patterns('', (r'^logout/custom_query/$', 'django.contrib.auth.views.logout', dict(redirect_field_name='follow')), (r'^logout/next_page/$', 'django.contrib.auth.views.logout', dict(next_page='/somewhere/')), (r'^remote_user/$', remote_user_auth_view), (r'^password_reset_from_email/$', 'django.contrib.auth.views.password_reset', dict(from_email='staffmember@example.com')), (r'^login_required/$', login_required(password_reset)), (r'^login_required_login_url/$', login_required(password_reset, login_url='/somewhere/')), (r'^auth_processor_no_attr_access/$', auth_processor_no_attr_access), (r'^auth_processor_attr_access/$', auth_processor_attr_access), (r'^auth_processor_user/$', auth_processor_user), (r'^auth_processor_perms/$', auth_processor_perms), (r'^auth_processor_perm_in_perms/$', auth_processor_perm_in_perms), (r'^auth_processor_messages/$', auth_processor_messages), url(r'^userpage/(.+)/$', userpage, name="userpage"), )
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3767_0
crossvul-python_data_bad_2078_2
#!/usr/bin/env python from __future__ import division, absolute_import, print_function __all__ = ['run_main', 'compile', 'f2py_testing'] import os import sys import subprocess from . import f2py2e from . import f2py_testing from . import diagnose from .info import __doc__ run_main = f2py2e.run_main main = f2py2e.main def compile(source, modulename = 'untitled', extra_args = '', verbose = 1, source_fn = None ): ''' Build extension module from processing source with f2py. Read the source of this function for more information. ''' from numpy.distutils.exec_command import exec_command import tempfile if source_fn is None: fname = os.path.join(tempfile.mktemp()+'.f') else: fname = source_fn f = open(fname, 'w') f.write(source) f.close() args = ' -c -m %s %s %s'%(modulename, fname, extra_args) c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable, args) s, o = exec_command(c) if source_fn is None: try: os.remove(fname) except OSError: pass return s from numpy.testing import Tester test = Tester().test bench = Tester().bench
./CrossVul/dataset_final_sorted/CWE-20/py/bad_2078_2
crossvul-python_data_good_872_1
# -*- test-case-name: twisted.web.test.test_webclient,twisted.web.test.test_agent -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ HTTP client. """ from __future__ import division, absolute_import import os import collections import warnings try: from urlparse import urlunparse, urljoin, urldefrag except ImportError: from urllib.parse import urljoin, urldefrag from urllib.parse import urlunparse as _urlunparse def urlunparse(parts): result = _urlunparse(tuple([p.decode("charmap") for p in parts])) return result.encode("charmap") import zlib from functools import wraps from zope.interface import implementer from twisted.python.compat import _PY3, networkString from twisted.python.compat import nativeString, intToBytes, unicode, itervalues from twisted.python.deprecate import deprecatedModuleAttribute, deprecated from twisted.python.failure import Failure from incremental import Version from twisted.web.iweb import IPolicyForHTTPS, IAgentEndpointFactory from twisted.python.deprecate import getDeprecationWarningString from twisted.web import http from twisted.internet import defer, protocol, task, reactor from twisted.internet.abstract import isIPv6Address from twisted.internet.interfaces import IProtocol, IOpenSSLContextFactory from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.python.util import InsensitiveDict from twisted.python.components import proxyForInterface from twisted.web import error from twisted.web.iweb import UNKNOWN_LENGTH, IAgent, IBodyProducer, IResponse from twisted.web.http_headers import Headers from twisted.logger import Logger from twisted.web._newclient import _ensureValidURI, _ensureValidMethod class PartialDownloadError(error.Error): """ Page was only partially downloaded, we got disconnected in middle. @ivar response: All of the response body which was downloaded. """ class HTTPPageGetter(http.HTTPClient): """ Gets a resource via HTTP, then quits. Typically used with L{HTTPClientFactory}. Note that this class does not, by itself, do anything with the response. If you want to download a resource into a file, use L{HTTPPageDownloader} instead. @ivar _completelyDone: A boolean indicating whether any further requests are necessary after this one completes in order to provide a result to C{self.factory.deferred}. If it is C{False}, then a redirect is going to be followed. Otherwise, this protocol's connection is the last one before firing the result Deferred. This is used to make sure the result Deferred is only fired after the connection is cleaned up. """ quietLoss = 0 followRedirect = True failed = 0 _completelyDone = True _specialHeaders = set( (b'host', b'user-agent', b'cookie', b'content-length'), ) def connectionMade(self): method = _ensureValidMethod(getattr(self.factory, 'method', b'GET')) self.sendCommand(method, _ensureValidURI(self.factory.path)) if self.factory.scheme == b'http' and self.factory.port != 80: host = self.factory.host + b':' + intToBytes(self.factory.port) elif self.factory.scheme == b'https' and self.factory.port != 443: host = self.factory.host + b':' + intToBytes(self.factory.port) else: host = self.factory.host self.sendHeader(b'Host', self.factory.headers.get(b"host", host)) self.sendHeader(b'User-Agent', self.factory.agent) data = getattr(self.factory, 'postdata', None) if data is not None: self.sendHeader(b"Content-Length", intToBytes(len(data))) cookieData = [] for (key, value) in self.factory.headers.items(): if key.lower() not in self._specialHeaders: # we calculated it on our own self.sendHeader(key, value) if key.lower() == b'cookie': cookieData.append(value) for cookie, cookval in self.factory.cookies.items(): cookieData.append(cookie + b'=' + cookval) if cookieData: self.sendHeader(b'Cookie', b'; '.join(cookieData)) self.endHeaders() self.headers = {} if data is not None: self.transport.write(data) def handleHeader(self, key, value): """ Called every time a header is received. Stores the header information as key-value pairs in the C{headers} attribute. @type key: C{str} @param key: An HTTP header field name. @type value: C{str} @param value: An HTTP header field value. """ key = key.lower() l = self.headers.setdefault(key, []) l.append(value) def handleStatus(self, version, status, message): """ Handle the HTTP status line. @param version: The HTTP version. @type version: L{bytes} @param status: The HTTP status code, an integer represented as a bytestring. @type status: L{bytes} @param message: The HTTP status message. @type message: L{bytes} """ self.version, self.status, self.message = version, status, message self.factory.gotStatus(version, status, message) def handleEndHeaders(self): self.factory.gotHeaders(self.headers) m = getattr(self, 'handleStatus_' + nativeString(self.status), self.handleStatusDefault) m() def handleStatus_200(self): pass handleStatus_201 = lambda self: self.handleStatus_200() handleStatus_202 = lambda self: self.handleStatus_200() def handleStatusDefault(self): self.failed = 1 def handleStatus_301(self): l = self.headers.get(b'location') if not l: self.handleStatusDefault() return url = l[0] if self.followRedirect: self.factory._redirectCount += 1 if self.factory._redirectCount >= self.factory.redirectLimit: err = error.InfiniteRedirection( self.status, b'Infinite redirection detected', location=url) self.factory.noPage(Failure(err)) self.quietLoss = True self.transport.loseConnection() return self._completelyDone = False self.factory.setURL(url) if self.factory.scheme == b'https': from twisted.internet import ssl contextFactory = ssl.ClientContextFactory() reactor.connectSSL(nativeString(self.factory.host), self.factory.port, self.factory, contextFactory) else: reactor.connectTCP(nativeString(self.factory.host), self.factory.port, self.factory) else: self.handleStatusDefault() self.factory.noPage( Failure( error.PageRedirect( self.status, self.message, location = url))) self.quietLoss = True self.transport.loseConnection() def handleStatus_302(self): if self.afterFoundGet: self.handleStatus_303() else: self.handleStatus_301() def handleStatus_303(self): self.factory.method = b'GET' self.handleStatus_301() def connectionLost(self, reason): """ When the connection used to issue the HTTP request is closed, notify the factory if we have not already, so it can produce a result. """ if not self.quietLoss: http.HTTPClient.connectionLost(self, reason) self.factory.noPage(reason) if self._completelyDone: # Only if we think we're completely done do we tell the factory that # we're "disconnected". This way when we're following redirects, # only the last protocol used will fire the _disconnectedDeferred. self.factory._disconnectedDeferred.callback(None) def handleResponse(self, response): if self.quietLoss: return if self.failed: self.factory.noPage( Failure( error.Error( self.status, self.message, response))) if self.factory.method == b'HEAD': # Callback with empty string, since there is never a response # body for HEAD requests. self.factory.page(b'') elif self.length != None and self.length != 0: self.factory.noPage(Failure( PartialDownloadError(self.status, self.message, response))) else: self.factory.page(response) # server might be stupid and not close connection. admittedly # the fact we do only one request per connection is also # stupid... self.transport.loseConnection() def timeout(self): self.quietLoss = True self.transport.abortConnection() self.factory.noPage(defer.TimeoutError("Getting %s took longer than %s seconds." % (self.factory.url, self.factory.timeout))) class HTTPPageDownloader(HTTPPageGetter): transmittingPage = 0 def handleStatus_200(self, partialContent=0): HTTPPageGetter.handleStatus_200(self) self.transmittingPage = 1 self.factory.pageStart(partialContent) def handleStatus_206(self): self.handleStatus_200(partialContent=1) def handleResponsePart(self, data): if self.transmittingPage: self.factory.pagePart(data) def handleResponseEnd(self): if self.length: self.transmittingPage = 0 self.factory.noPage( Failure( PartialDownloadError(self.status))) if self.transmittingPage: self.factory.pageEnd() self.transmittingPage = 0 if self.failed: self.factory.noPage( Failure( error.Error( self.status, self.message, None))) self.transport.loseConnection() class HTTPClientFactory(protocol.ClientFactory): """Download a given URL. @type deferred: Deferred @ivar deferred: A Deferred that will fire when the content has been retrieved. Once this is fired, the ivars `status', `version', and `message' will be set. @type status: bytes @ivar status: The status of the response. @type version: bytes @ivar version: The version of the response. @type message: bytes @ivar message: The text message returned with the status. @type response_headers: dict @ivar response_headers: The headers that were specified in the response from the server. @type method: bytes @ivar method: The HTTP method to use in the request. This should be one of OPTIONS, GET, HEAD, POST, PUT, DELETE, TRACE, or CONNECT (case matters). Other values may be specified if the server being contacted supports them. @type redirectLimit: int @ivar redirectLimit: The maximum number of HTTP redirects that can occur before it is assumed that the redirection is endless. @type afterFoundGet: C{bool} @ivar afterFoundGet: Deviate from the HTTP 1.1 RFC by handling redirects the same way as most web browsers; if the request method is POST and a 302 status is encountered, the redirect is followed with a GET method @type _redirectCount: int @ivar _redirectCount: The current number of HTTP redirects encountered. @ivar _disconnectedDeferred: A L{Deferred} which only fires after the last connection associated with the request (redirects may cause multiple connections to be required) has closed. The result Deferred will only fire after this Deferred, so that callers can be assured that there are no more event sources in the reactor once they get the result. """ protocol = HTTPPageGetter url = None scheme = None host = b'' port = None path = None def __init__(self, url, method=b'GET', postdata=None, headers=None, agent=b"Twisted PageGetter", timeout=0, cookies=None, followRedirect=True, redirectLimit=20, afterFoundGet=False): self.followRedirect = followRedirect self.redirectLimit = redirectLimit self._redirectCount = 0 self.timeout = timeout self.agent = agent self.afterFoundGet = afterFoundGet if cookies is None: cookies = {} self.cookies = cookies if headers is not None: self.headers = InsensitiveDict(headers) else: self.headers = InsensitiveDict() if postdata is not None: self.headers.setdefault(b'Content-Length', intToBytes(len(postdata))) # just in case a broken http/1.1 decides to keep connection alive self.headers.setdefault(b"connection", b"close") self.postdata = postdata self.method = _ensureValidMethod(method) self.setURL(url) self.waiting = 1 self._disconnectedDeferred = defer.Deferred() self.deferred = defer.Deferred() # Make sure the first callback on the result Deferred pauses the # callback chain until the request connection is closed. self.deferred.addBoth(self._waitForDisconnect) self.response_headers = None def _waitForDisconnect(self, passthrough): """ Chain onto the _disconnectedDeferred, preserving C{passthrough}, so that the result is only available after the associated connection has been closed. """ self._disconnectedDeferred.addCallback(lambda ignored: passthrough) return self._disconnectedDeferred def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.url) def setURL(self, url): _ensureValidURI(url.strip()) self.url = url uri = URI.fromBytes(url) if uri.scheme and uri.host: self.scheme = uri.scheme self.host = uri.host self.port = uri.port self.path = uri.originForm def buildProtocol(self, addr): p = protocol.ClientFactory.buildProtocol(self, addr) p.followRedirect = self.followRedirect p.afterFoundGet = self.afterFoundGet if self.timeout: timeoutCall = reactor.callLater(self.timeout, p.timeout) self.deferred.addBoth(self._cancelTimeout, timeoutCall) return p def _cancelTimeout(self, result, timeoutCall): if timeoutCall.active(): timeoutCall.cancel() return result def gotHeaders(self, headers): """ Parse the response HTTP headers. @param headers: The response HTTP headers. @type headers: L{dict} """ self.response_headers = headers if b'set-cookie' in headers: for cookie in headers[b'set-cookie']: if b'=' in cookie: cookparts = cookie.split(b';') cook = cookparts[0] cook.lstrip() k, v = cook.split(b'=', 1) self.cookies[k.lstrip()] = v.lstrip() def gotStatus(self, version, status, message): """ Set the status of the request on us. @param version: The HTTP version. @type version: L{bytes} @param status: The HTTP status code, an integer represented as a bytestring. @type status: L{bytes} @param message: The HTTP status message. @type message: L{bytes} """ self.version, self.status, self.message = version, status, message def page(self, page): if self.waiting: self.waiting = 0 self.deferred.callback(page) def noPage(self, reason): if self.waiting: self.waiting = 0 self.deferred.errback(reason) def clientConnectionFailed(self, _, reason): """ When a connection attempt fails, the request cannot be issued. If no result has yet been provided to the result Deferred, provide the connection failure reason as an error result. """ if self.waiting: self.waiting = 0 # If the connection attempt failed, there is nothing more to # disconnect, so just fire that Deferred now. self._disconnectedDeferred.callback(None) self.deferred.errback(reason) class HTTPDownloader(HTTPClientFactory): """ Download to a file. """ protocol = HTTPPageDownloader value = None _log = Logger() def __init__(self, url, fileOrName, method=b'GET', postdata=None, headers=None, agent=b"Twisted client", supportPartial=False, timeout=0, cookies=None, followRedirect=True, redirectLimit=20, afterFoundGet=False): self.requestedPartial = 0 if isinstance(fileOrName, (str, unicode)): self.fileName = fileOrName self.file = None if supportPartial and os.path.exists(self.fileName): fileLength = os.path.getsize(self.fileName) if fileLength: self.requestedPartial = fileLength if headers == None: headers = {} headers[b"range"] = b"bytes=" + intToBytes(fileLength) + b"-" else: self.file = fileOrName HTTPClientFactory.__init__( self, url, method=method, postdata=postdata, headers=headers, agent=agent, timeout=timeout, cookies=cookies, followRedirect=followRedirect, redirectLimit=redirectLimit, afterFoundGet=afterFoundGet) def gotHeaders(self, headers): HTTPClientFactory.gotHeaders(self, headers) if self.requestedPartial: contentRange = headers.get(b"content-range", None) if not contentRange: # server doesn't support partial requests, oh well self.requestedPartial = 0 return start, end, realLength = http.parseContentRange(contentRange[0]) if start != self.requestedPartial: # server is acting weirdly self.requestedPartial = 0 def openFile(self, partialContent): if partialContent: file = open(self.fileName, 'rb+') file.seek(0, 2) else: file = open(self.fileName, 'wb') return file def pageStart(self, partialContent): """Called on page download start. @param partialContent: tells us if the download is partial download we requested. """ if partialContent and not self.requestedPartial: raise ValueError("we shouldn't get partial content response if we didn't want it!") if self.waiting: try: if not self.file: self.file = self.openFile(partialContent) except IOError: #raise self.deferred.errback(Failure()) def pagePart(self, data): if not self.file: return try: self.file.write(data) except IOError: #raise self.file = None self.deferred.errback(Failure()) def noPage(self, reason): """ Close the storage file and errback the waiting L{Deferred} with the given reason. """ if self.waiting: self.waiting = 0 if self.file: try: self.file.close() except: self._log.failure("Error closing HTTPDownloader file") self.deferred.errback(reason) def pageEnd(self): self.waiting = 0 if not self.file: return try: self.file.close() except IOError: self.deferred.errback(Failure()) return self.deferred.callback(self.value) class URI(object): """ A URI object. @see: U{https://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-21} """ def __init__(self, scheme, netloc, host, port, path, params, query, fragment): """ @type scheme: L{bytes} @param scheme: URI scheme specifier. @type netloc: L{bytes} @param netloc: Network location component. @type host: L{bytes} @param host: Host name. For IPv6 address literals the brackets are stripped. @type port: L{int} @param port: Port number. @type path: L{bytes} @param path: Hierarchical path. @type params: L{bytes} @param params: Parameters for last path segment. @type query: L{bytes} @param query: Query string. @type fragment: L{bytes} @param fragment: Fragment identifier. """ self.scheme = scheme self.netloc = netloc self.host = host.strip(b'[]') self.port = port self.path = path self.params = params self.query = query self.fragment = fragment @classmethod def fromBytes(cls, uri, defaultPort=None): """ Parse the given URI into a L{URI}. @type uri: C{bytes} @param uri: URI to parse. @type defaultPort: C{int} or L{None} @param defaultPort: An alternate value to use as the port if the URI does not include one. @rtype: L{URI} @return: Parsed URI instance. """ uri = uri.strip() scheme, netloc, path, params, query, fragment = http.urlparse(uri) if defaultPort is None: if scheme == b'https': defaultPort = 443 else: defaultPort = 80 if b':' in netloc: host, port = netloc.rsplit(b':', 1) try: port = int(port) except ValueError: host, port = netloc, defaultPort else: host, port = netloc, defaultPort return cls(scheme, netloc, host, port, path, params, query, fragment) def toBytes(self): """ Assemble the individual parts of the I{URI} into a fully formed I{URI}. @rtype: C{bytes} @return: A fully formed I{URI}. """ return urlunparse( (self.scheme, self.netloc, self.path, self.params, self.query, self.fragment)) @property def originForm(self): """ The absolute I{URI} path including I{URI} parameters, query string and fragment identifier. @see: U{https://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-21#section-5.3} @return: The absolute path in original form. @rtype: L{bytes} """ # The HTTP bis draft says the origin form should not include the # fragment. path = urlunparse( (b'', b'', self.path, self.params, self.query, b'')) if path == b'': path = b'/' return path def _urljoin(base, url): """ Construct a full ("absolute") URL by combining a "base URL" with another URL. Informally, this uses components of the base URL, in particular the addressing scheme, the network location and (part of) the path, to provide missing components in the relative URL. Additionally, the fragment identifier is preserved according to the HTTP 1.1 bis draft. @type base: C{bytes} @param base: Base URL. @type url: C{bytes} @param url: URL to combine with C{base}. @return: An absolute URL resulting from the combination of C{base} and C{url}. @see: L{urlparse.urljoin} @see: U{https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-22#section-7.1.2} """ base, baseFrag = urldefrag(base) url, urlFrag = urldefrag(urljoin(base, url)) return urljoin(url, b'#' + (urlFrag or baseFrag)) def _makeGetterFactory(url, factoryFactory, contextFactory=None, *args, **kwargs): """ Create and connect an HTTP page getting factory. Any additional positional or keyword arguments are used when calling C{factoryFactory}. @param factoryFactory: Factory factory that is called with C{url}, C{args} and C{kwargs} to produce the getter @param contextFactory: Context factory to use when creating a secure connection, defaulting to L{None} @return: The factory created by C{factoryFactory} """ uri = URI.fromBytes(_ensureValidURI(url.strip())) factory = factoryFactory(url, *args, **kwargs) if uri.scheme == b'https': from twisted.internet import ssl if contextFactory is None: contextFactory = ssl.ClientContextFactory() reactor.connectSSL( nativeString(uri.host), uri.port, factory, contextFactory) else: reactor.connectTCP(nativeString(uri.host), uri.port, factory) return factory _GETPAGE_REPLACEMENT_TEXT = "https://pypi.org/project/treq/ or twisted.web.client.Agent" def _deprecateGetPageClasses(): """ Mark the protocols and factories associated with L{getPage} and L{downloadPage} as deprecated. """ for klass in [ HTTPPageGetter, HTTPPageDownloader, HTTPClientFactory, HTTPDownloader ]: deprecatedModuleAttribute( Version("Twisted", 16, 7, 0), getDeprecationWarningString( klass, Version("Twisted", 16, 7, 0), replacement=_GETPAGE_REPLACEMENT_TEXT) .split("; ")[1], klass.__module__, klass.__name__) _deprecateGetPageClasses() @deprecated(Version("Twisted", 16, 7, 0), _GETPAGE_REPLACEMENT_TEXT) def getPage(url, contextFactory=None, *args, **kwargs): """ Download a web page as a string. Download a page. Return a deferred, which will callback with a page (as a string) or errback with a description of the error. See L{HTTPClientFactory} to see what extra arguments can be passed. """ return _makeGetterFactory( url, HTTPClientFactory, contextFactory=contextFactory, *args, **kwargs).deferred @deprecated(Version("Twisted", 16, 7, 0), _GETPAGE_REPLACEMENT_TEXT) def downloadPage(url, file, contextFactory=None, *args, **kwargs): """ Download a web page to a file. @param file: path to file on filesystem, or file-like object. See HTTPDownloader to see what extra args can be passed. """ factoryFactory = lambda url, *a, **kw: HTTPDownloader(url, file, *a, **kw) return _makeGetterFactory( url, factoryFactory, contextFactory=contextFactory, *args, **kwargs).deferred # The code which follows is based on the new HTTP client implementation. It # should be significantly better than anything above, though it is not yet # feature equivalent. from twisted.web.error import SchemeNotSupported from twisted.web._newclient import ( HTTP11ClientProtocol, PotentialDataLoss, Request, RequestGenerationFailed, RequestNotSent, RequestTransmissionFailed, Response, ResponseDone, ResponseFailed, ResponseNeverReceived, _WrapperException, ) try: from OpenSSL import SSL except ImportError: SSL = None else: from twisted.internet.ssl import (CertificateOptions, platformTrust, optionsForClientTLS) def _requireSSL(decoratee): """ The decorated method requires pyOpenSSL to be present, or it raises L{NotImplementedError}. @param decoratee: A function which requires pyOpenSSL. @type decoratee: L{callable} @return: A function which raises L{NotImplementedError} if pyOpenSSL is not installed; otherwise, if it is installed, simply return C{decoratee}. @rtype: L{callable} """ if SSL is None: @wraps(decoratee) def raiseNotImplemented(*a, **kw): """ pyOpenSSL is not available. @param a: The positional arguments for C{decoratee}. @param kw: The keyword arguments for C{decoratee}. @raise NotImplementedError: Always. """ raise NotImplementedError("SSL support unavailable") return raiseNotImplemented return decoratee class WebClientContextFactory(object): """ This class is deprecated. Please simply use L{Agent} as-is, or if you want to customize something, use L{BrowserLikePolicyForHTTPS}. A L{WebClientContextFactory} is an HTTPS policy which totally ignores the hostname and port. It performs basic certificate verification, however the lack of validation of service identity (e.g. hostname validation) means it is still vulnerable to man-in-the-middle attacks. Don't use it any more. """ def _getCertificateOptions(self, hostname, port): """ Return a L{CertificateOptions}. @param hostname: ignored @param port: ignored @return: A new CertificateOptions instance. @rtype: L{CertificateOptions} """ return CertificateOptions( method=SSL.SSLv23_METHOD, trustRoot=platformTrust() ) @_requireSSL def getContext(self, hostname, port): """ Return an L{OpenSSL.SSL.Context}. @param hostname: ignored @param port: ignored @return: A new SSL context. @rtype: L{OpenSSL.SSL.Context} """ return self._getCertificateOptions(hostname, port).getContext() @implementer(IPolicyForHTTPS) class BrowserLikePolicyForHTTPS(object): """ SSL connection creator for web clients. """ def __init__(self, trustRoot=None): self._trustRoot = trustRoot @_requireSSL def creatorForNetloc(self, hostname, port): """ Create a L{client connection creator <twisted.internet.interfaces.IOpenSSLClientConnectionCreator>} for a given network location. @param tls: The TLS protocol to create a connection for. @type tls: L{twisted.protocols.tls.TLSMemoryBIOProtocol} @param hostname: The hostname part of the URI. @type hostname: L{bytes} @param port: The port part of the URI. @type port: L{int} @return: a connection creator with appropriate verification restrictions set @rtype: L{client connection creator <twisted.internet.interfaces.IOpenSSLClientConnectionCreator>} """ return optionsForClientTLS(hostname.decode("ascii"), trustRoot=self._trustRoot) deprecatedModuleAttribute(Version("Twisted", 14, 0, 0), getDeprecationWarningString( WebClientContextFactory, Version("Twisted", 14, 0, 0), replacement=BrowserLikePolicyForHTTPS) .split("; ")[1], WebClientContextFactory.__module__, WebClientContextFactory.__name__) @implementer(IPolicyForHTTPS) class HostnameCachingHTTPSPolicy(object): """ IPolicyForHTTPS that wraps a L{IPolicyForHTTPS} and caches the created L{IOpenSSLClientConnectionCreator}. This policy will cache up to C{cacheSize} L{client connection creators <twisted.internet.interfaces. IOpenSSLClientConnectionCreator>} for reuse in subsequent requests to the same hostname. @ivar _policyForHTTPS: See C{policyforHTTPS} parameter of L{__init__}. @ivar _cache: A cache associating hostnames to their L{client connection creators <twisted.internet.interfaces. IOpenSSLClientConnectionCreator>}. @type _cache: L{collections.OrderedDict} @ivar _cacheSize: See C{cacheSize} parameter of L{__init__}. @since: Twisted 19.2.0 """ def __init__(self, policyforHTTPS, cacheSize=20): """ @param policyforHTTPS: The IPolicyForHTTPS to wrap. @type policyforHTTPS: L{IPolicyForHTTPS} @param cacheSize: The maximum size of the hostname cache. @type cacheSize: L{int} """ self._policyForHTTPS = policyforHTTPS self._cache = collections.OrderedDict() self._cacheSize = cacheSize def creatorForNetloc(self, hostname, port): """ Create a L{client connection creator <twisted.internet.interfaces.IOpenSSLClientConnectionCreator>} for a given network location and cache it for future use. @param hostname: The hostname part of the URI. @type hostname: L{bytes} @param port: The port part of the URI. @type port: L{int} @return: a connection creator with appropriate verification restrictions set @rtype: L{client connection creator <twisted.internet.interfaces.IOpenSSLClientConnectionCreator>} """ host = hostname.decode("ascii") try: creator = self._cache.pop(host) except KeyError: creator = self._policyForHTTPS.creatorForNetloc(hostname, port) self._cache[host] = creator if len(self._cache) > self._cacheSize: self._cache.popitem(last=False) return creator @implementer(IOpenSSLContextFactory) class _ContextFactoryWithContext(object): """ A L{_ContextFactoryWithContext} is like a L{twisted.internet.ssl.ContextFactory} with a pre-created context. @ivar _context: A Context. @type _context: L{OpenSSL.SSL.Context} """ def __init__(self, context): """ Initialize a L{_ContextFactoryWithContext} with a context. @param context: An SSL context. @type context: L{OpenSSL.SSL.Context} """ self._context = context def getContext(self): """ Return the context created by L{_DeprecatedToCurrentPolicyForHTTPS._webContextFactory}. @return: A context. @rtype context: L{OpenSSL.SSL.Context} """ return self._context @implementer(IPolicyForHTTPS) class _DeprecatedToCurrentPolicyForHTTPS(object): """ Adapt a web context factory to a normal context factory. @ivar _webContextFactory: An object providing a getContext method with C{hostname} and C{port} arguments. @type _webContextFactory: L{WebClientContextFactory} (or object with a similar C{getContext} method). """ def __init__(self, webContextFactory): """ Wrap a web context factory in an L{IPolicyForHTTPS}. @param webContextFactory: An object providing a getContext method with C{hostname} and C{port} arguments. @type webContextFactory: L{WebClientContextFactory} (or object with a similar C{getContext} method). """ self._webContextFactory = webContextFactory def creatorForNetloc(self, hostname, port): """ Called the wrapped web context factory's C{getContext} method with a hostname and port number and return the resulting context object. @param hostname: The hostname part of the URI. @type hostname: L{bytes} @param port: The port part of the URI. @type port: L{int} @return: A context factory. @rtype: L{IOpenSSLContextFactory} """ context = self._webContextFactory.getContext(hostname, port) return _ContextFactoryWithContext(context) @implementer(IBodyProducer) class FileBodyProducer(object): """ L{FileBodyProducer} produces bytes from an input file object incrementally and writes them to a consumer. Since file-like objects cannot be read from in an event-driven manner, L{FileBodyProducer} uses a L{Cooperator} instance to schedule reads from the file. This process is also paused and resumed based on notifications from the L{IConsumer} provider being written to. The file is closed after it has been read, or if the producer is stopped early. @ivar _inputFile: Any file-like object, bytes read from which will be written to a consumer. @ivar _cooperate: A method like L{Cooperator.cooperate} which is used to schedule all reads. @ivar _readSize: The number of bytes to read from C{_inputFile} at a time. """ def __init__(self, inputFile, cooperator=task, readSize=2 ** 16): self._inputFile = inputFile self._cooperate = cooperator.cooperate self._readSize = readSize self.length = self._determineLength(inputFile) def _determineLength(self, fObj): """ Determine how many bytes can be read out of C{fObj} (assuming it is not modified from this point on). If the determination cannot be made, return C{UNKNOWN_LENGTH}. """ try: seek = fObj.seek tell = fObj.tell except AttributeError: return UNKNOWN_LENGTH originalPosition = tell() seek(0, os.SEEK_END) end = tell() seek(originalPosition, os.SEEK_SET) return end - originalPosition def stopProducing(self): """ Permanently stop writing bytes from the file to the consumer by stopping the underlying L{CooperativeTask}. """ self._inputFile.close() self._task.stop() def startProducing(self, consumer): """ Start a cooperative task which will read bytes from the input file and write them to C{consumer}. Return a L{Deferred} which fires after all bytes have been written. @param consumer: Any L{IConsumer} provider """ self._task = self._cooperate(self._writeloop(consumer)) d = self._task.whenDone() def maybeStopped(reason): # IBodyProducer.startProducing's Deferred isn't support to fire if # stopProducing is called. reason.trap(task.TaskStopped) return defer.Deferred() d.addCallbacks(lambda ignored: None, maybeStopped) return d def _writeloop(self, consumer): """ Return an iterator which reads one chunk of bytes from the input file and writes them to the consumer for each time it is iterated. """ while True: bytes = self._inputFile.read(self._readSize) if not bytes: self._inputFile.close() break consumer.write(bytes) yield None def pauseProducing(self): """ Temporarily suspend copying bytes from the input file to the consumer by pausing the L{CooperativeTask} which drives that activity. """ self._task.pause() def resumeProducing(self): """ Undo the effects of a previous C{pauseProducing} and resume copying bytes to the consumer by resuming the L{CooperativeTask} which drives the write activity. """ self._task.resume() class _HTTP11ClientFactory(protocol.Factory): """ A factory for L{HTTP11ClientProtocol}, used by L{HTTPConnectionPool}. @ivar _quiescentCallback: The quiescent callback to be passed to protocol instances, used to return them to the connection pool. @ivar _metadata: Metadata about the low-level connection details, used to make the repr more useful. @since: 11.1 """ def __init__(self, quiescentCallback, metadata): self._quiescentCallback = quiescentCallback self._metadata = metadata def __repr__(self): return '_HTTP11ClientFactory({}, {})'.format( self._quiescentCallback, self._metadata) def buildProtocol(self, addr): return HTTP11ClientProtocol(self._quiescentCallback) class _RetryingHTTP11ClientProtocol(object): """ A wrapper for L{HTTP11ClientProtocol} that automatically retries requests. @ivar _clientProtocol: The underlying L{HTTP11ClientProtocol}. @ivar _newConnection: A callable that creates a new connection for a retry. """ def __init__(self, clientProtocol, newConnection): self._clientProtocol = clientProtocol self._newConnection = newConnection def _shouldRetry(self, method, exception, bodyProducer): """ Indicate whether request should be retried. Only returns C{True} if method is idempotent, no response was received, the reason for the failed request was not due to user-requested cancellation, and no body was sent. The latter requirement may be relaxed in the future, and PUT added to approved method list. @param method: The method of the request. @type method: L{bytes} """ if method not in (b"GET", b"HEAD", b"OPTIONS", b"DELETE", b"TRACE"): return False if not isinstance(exception, (RequestNotSent, RequestTransmissionFailed, ResponseNeverReceived)): return False if isinstance(exception, _WrapperException): for aFailure in exception.reasons: if aFailure.check(defer.CancelledError): return False if bodyProducer is not None: return False return True def request(self, request): """ Do a request, and retry once (with a new connection) if it fails in a retryable manner. @param request: A L{Request} instance that will be requested using the wrapped protocol. """ d = self._clientProtocol.request(request) def failed(reason): if self._shouldRetry(request.method, reason.value, request.bodyProducer): return self._newConnection().addCallback( lambda connection: connection.request(request)) else: return reason d.addErrback(failed) return d class HTTPConnectionPool(object): """ A pool of persistent HTTP connections. Features: - Cached connections will eventually time out. - Limits on maximum number of persistent connections. Connections are stored using keys, which should be chosen such that any connections stored under a given key can be used interchangeably. Failed requests done using previously cached connections will be retried once if they use an idempotent method (e.g. GET), in case the HTTP server timed them out. @ivar persistent: Boolean indicating whether connections should be persistent. Connections are persistent by default. @ivar maxPersistentPerHost: The maximum number of cached persistent connections for a C{host:port} destination. @type maxPersistentPerHost: C{int} @ivar cachedConnectionTimeout: Number of seconds a cached persistent connection will stay open before disconnecting. @ivar retryAutomatically: C{boolean} indicating whether idempotent requests should be retried once if no response was received. @ivar _factory: The factory used to connect to the proxy. @ivar _connections: Map (scheme, host, port) to lists of L{HTTP11ClientProtocol} instances. @ivar _timeouts: Map L{HTTP11ClientProtocol} instances to a C{IDelayedCall} instance of their timeout. @since: 12.1 """ _factory = _HTTP11ClientFactory maxPersistentPerHost = 2 cachedConnectionTimeout = 240 retryAutomatically = True _log = Logger() def __init__(self, reactor, persistent=True): self._reactor = reactor self.persistent = persistent self._connections = {} self._timeouts = {} def getConnection(self, key, endpoint): """ Supply a connection, newly created or retrieved from the pool, to be used for one HTTP request. The connection will remain out of the pool (not available to be returned from future calls to this method) until one HTTP request has been completed over it. Afterwards, if the connection is still open, it will automatically be added to the pool. @param key: A unique key identifying connections that can be used interchangeably. @param endpoint: An endpoint that can be used to open a new connection if no cached connection is available. @return: A C{Deferred} that will fire with a L{HTTP11ClientProtocol} (or a wrapper) that can be used to send a single HTTP request. """ # Try to get cached version: connections = self._connections.get(key) while connections: connection = connections.pop(0) # Cancel timeout: self._timeouts[connection].cancel() del self._timeouts[connection] if connection.state == "QUIESCENT": if self.retryAutomatically: newConnection = lambda: self._newConnection(key, endpoint) connection = _RetryingHTTP11ClientProtocol( connection, newConnection) return defer.succeed(connection) return self._newConnection(key, endpoint) def _newConnection(self, key, endpoint): """ Create a new connection. This implements the new connection code path for L{getConnection}. """ def quiescentCallback(protocol): self._putConnection(key, protocol) factory = self._factory(quiescentCallback, repr(endpoint)) return endpoint.connect(factory) def _removeConnection(self, key, connection): """ Remove a connection from the cache and disconnect it. """ connection.transport.loseConnection() self._connections[key].remove(connection) del self._timeouts[connection] def _putConnection(self, key, connection): """ Return a persistent connection to the pool. This will be called by L{HTTP11ClientProtocol} when the connection becomes quiescent. """ if connection.state != "QUIESCENT": # Log with traceback for debugging purposes: try: raise RuntimeError( "BUG: Non-quiescent protocol added to connection pool.") except: self._log.failure( "BUG: Non-quiescent protocol added to connection pool.") return connections = self._connections.setdefault(key, []) if len(connections) == self.maxPersistentPerHost: dropped = connections.pop(0) dropped.transport.loseConnection() self._timeouts[dropped].cancel() del self._timeouts[dropped] connections.append(connection) cid = self._reactor.callLater(self.cachedConnectionTimeout, self._removeConnection, key, connection) self._timeouts[connection] = cid def closeCachedConnections(self): """ Close all persistent connections and remove them from the pool. @return: L{defer.Deferred} that fires when all connections have been closed. """ results = [] for protocols in itervalues(self._connections): for p in protocols: results.append(p.abort()) self._connections = {} for dc in itervalues(self._timeouts): dc.cancel() self._timeouts = {} return defer.gatherResults(results).addCallback(lambda ign: None) class _AgentBase(object): """ Base class offering common facilities for L{Agent}-type classes. @ivar _reactor: The C{IReactorTime} implementation which will be used by the pool, and perhaps by subclasses as well. @ivar _pool: The L{HTTPConnectionPool} used to manage HTTP connections. """ def __init__(self, reactor, pool): if pool is None: pool = HTTPConnectionPool(reactor, False) self._reactor = reactor self._pool = pool def _computeHostValue(self, scheme, host, port): """ Compute the string to use for the value of the I{Host} header, based on the given scheme, host name, and port number. """ if (isIPv6Address(nativeString(host))): host = b'[' + host + b']' if (scheme, port) in ((b'http', 80), (b'https', 443)): return host return host + b":" + intToBytes(port) def _requestWithEndpoint(self, key, endpoint, method, parsedURI, headers, bodyProducer, requestPath): """ Issue a new request, given the endpoint and the path sent as part of the request. """ if not isinstance(method, bytes): raise TypeError('method={!r} is {}, but must be bytes'.format( method, type(method))) method = _ensureValidMethod(method) # Create minimal headers, if necessary: if headers is None: headers = Headers() if not headers.hasHeader(b'host'): headers = headers.copy() headers.addRawHeader( b'host', self._computeHostValue(parsedURI.scheme, parsedURI.host, parsedURI.port)) d = self._pool.getConnection(key, endpoint) def cbConnected(proto): return proto.request( Request._construct(method, requestPath, headers, bodyProducer, persistent=self._pool.persistent, parsedURI=parsedURI)) d.addCallback(cbConnected) return d @implementer(IAgentEndpointFactory) class _StandardEndpointFactory(object): """ Standard HTTP endpoint destinations - TCP for HTTP, TCP+TLS for HTTPS. @ivar _policyForHTTPS: A web context factory which will be used to create SSL context objects for any SSL connections the agent needs to make. @ivar _connectTimeout: If not L{None}, the timeout passed to L{HostnameEndpoint} for specifying the connection timeout. @ivar _bindAddress: If not L{None}, the address passed to L{HostnameEndpoint} for specifying the local address to bind to. """ def __init__(self, reactor, contextFactory, connectTimeout, bindAddress): """ @param reactor: A provider to use to create endpoints. @type reactor: see L{HostnameEndpoint.__init__} for acceptable reactor types. @param contextFactory: A factory for TLS contexts, to control the verification parameters of OpenSSL. @type contextFactory: L{IPolicyForHTTPS}. @param connectTimeout: The amount of time that this L{Agent} will wait for the peer to accept a connection. @type connectTimeout: L{float} or L{None} @param bindAddress: The local address for client sockets to bind to. @type bindAddress: L{bytes} or L{None} """ self._reactor = reactor self._policyForHTTPS = contextFactory self._connectTimeout = connectTimeout self._bindAddress = bindAddress def endpointForURI(self, uri): """ Connect directly over TCP for C{b'http'} scheme, and TLS for C{b'https'}. @param uri: L{URI} to connect to. @return: Endpoint to connect to. @rtype: L{IStreamClientEndpoint} """ kwargs = {} if self._connectTimeout is not None: kwargs['timeout'] = self._connectTimeout kwargs['bindAddress'] = self._bindAddress try: host = nativeString(uri.host) except UnicodeDecodeError: raise ValueError(("The host of the provided URI ({uri.host!r}) " "contains non-ASCII octets, it should be ASCII " "decodable.").format(uri=uri)) endpoint = HostnameEndpoint(self._reactor, host, uri.port, **kwargs) if uri.scheme == b'http': return endpoint elif uri.scheme == b'https': connectionCreator = self._policyForHTTPS.creatorForNetloc(uri.host, uri.port) return wrapClientTLS(connectionCreator, endpoint) else: raise SchemeNotSupported("Unsupported scheme: %r" % (uri.scheme,)) @implementer(IAgent) class Agent(_AgentBase): """ L{Agent} is a very basic HTTP client. It supports I{HTTP} and I{HTTPS} scheme URIs. @ivar _pool: An L{HTTPConnectionPool} instance. @ivar _endpointFactory: The L{IAgentEndpointFactory} which will be used to create endpoints for outgoing connections. @since: 9.0 """ def __init__(self, reactor, contextFactory=BrowserLikePolicyForHTTPS(), connectTimeout=None, bindAddress=None, pool=None): """ Create an L{Agent}. @param reactor: A reactor for this L{Agent} to place outgoing connections. @type reactor: see L{HostnameEndpoint.__init__} for acceptable reactor types. @param contextFactory: A factory for TLS contexts, to control the verification parameters of OpenSSL. The default is to use a L{BrowserLikePolicyForHTTPS}, so unless you have special requirements you can leave this as-is. @type contextFactory: L{IPolicyForHTTPS}. @param connectTimeout: The amount of time that this L{Agent} will wait for the peer to accept a connection. @type connectTimeout: L{float} @param bindAddress: The local address for client sockets to bind to. @type bindAddress: L{bytes} @param pool: An L{HTTPConnectionPool} instance, or L{None}, in which case a non-persistent L{HTTPConnectionPool} instance will be created. @type pool: L{HTTPConnectionPool} """ if not IPolicyForHTTPS.providedBy(contextFactory): warnings.warn( repr(contextFactory) + " was passed as the HTTPS policy for an Agent, but it does " "not provide IPolicyForHTTPS. Since Twisted 14.0, you must " "pass a provider of IPolicyForHTTPS.", stacklevel=2, category=DeprecationWarning ) contextFactory = _DeprecatedToCurrentPolicyForHTTPS(contextFactory) endpointFactory = _StandardEndpointFactory( reactor, contextFactory, connectTimeout, bindAddress) self._init(reactor, endpointFactory, pool) @classmethod def usingEndpointFactory(cls, reactor, endpointFactory, pool=None): """ Create a new L{Agent} that will use the endpoint factory to figure out how to connect to the server. @param reactor: A reactor for this L{Agent} to place outgoing connections. @type reactor: see L{HostnameEndpoint.__init__} for acceptable reactor types. @param endpointFactory: Used to construct endpoints which the HTTP client will connect with. @type endpointFactory: an L{IAgentEndpointFactory} provider. @param pool: An L{HTTPConnectionPool} instance, or L{None}, in which case a non-persistent L{HTTPConnectionPool} instance will be created. @type pool: L{HTTPConnectionPool} @return: A new L{Agent}. """ agent = cls.__new__(cls) agent._init(reactor, endpointFactory, pool) return agent def _init(self, reactor, endpointFactory, pool): """ Initialize a new L{Agent}. @param reactor: A reactor for this L{Agent} to place outgoing connections. @type reactor: see L{HostnameEndpoint.__init__} for acceptable reactor types. @param endpointFactory: Used to construct endpoints which the HTTP client will connect with. @type endpointFactory: an L{IAgentEndpointFactory} provider. @param pool: An L{HTTPConnectionPool} instance, or L{None}, in which case a non-persistent L{HTTPConnectionPool} instance will be created. @type pool: L{HTTPConnectionPool} @return: A new L{Agent}. """ _AgentBase.__init__(self, reactor, pool) self._endpointFactory = endpointFactory def _getEndpoint(self, uri): """ Get an endpoint for the given URI, using C{self._endpointFactory}. @param uri: The URI of the request. @type uri: L{URI} @return: An endpoint which can be used to connect to given address. """ return self._endpointFactory.endpointForURI(uri) def request(self, method, uri, headers=None, bodyProducer=None): """ Issue a request to the server indicated by the given C{uri}. An existing connection from the connection pool may be used or a new one may be created. I{HTTP} and I{HTTPS} schemes are supported in C{uri}. @see: L{twisted.web.iweb.IAgent.request} """ uri = _ensureValidURI(uri.strip()) parsedURI = URI.fromBytes(uri) try: endpoint = self._getEndpoint(parsedURI) except SchemeNotSupported: return defer.fail(Failure()) key = (parsedURI.scheme, parsedURI.host, parsedURI.port) return self._requestWithEndpoint(key, endpoint, method, parsedURI, headers, bodyProducer, parsedURI.originForm) @implementer(IAgent) class ProxyAgent(_AgentBase): """ An HTTP agent able to cross HTTP proxies. @ivar _proxyEndpoint: The endpoint used to connect to the proxy. @since: 11.1 """ def __init__(self, endpoint, reactor=None, pool=None): if reactor is None: from twisted.internet import reactor _AgentBase.__init__(self, reactor, pool) self._proxyEndpoint = endpoint def request(self, method, uri, headers=None, bodyProducer=None): """ Issue a new request via the configured proxy. """ uri = _ensureValidURI(uri.strip()) # Cache *all* connections under the same key, since we are only # connecting to a single destination, the proxy: key = ("http-proxy", self._proxyEndpoint) # To support proxying HTTPS via CONNECT, we will use key # ("http-proxy-CONNECT", scheme, host, port), and an endpoint that # wraps _proxyEndpoint with an additional callback to do the CONNECT. return self._requestWithEndpoint(key, self._proxyEndpoint, method, URI.fromBytes(uri), headers, bodyProducer, uri) class _FakeUrllib2Request(object): """ A fake C{urllib2.Request} object for C{cookielib} to work with. @see: U{http://docs.python.org/library/urllib2.html#request-objects} @type uri: native L{str} @ivar uri: Request URI. @type headers: L{twisted.web.http_headers.Headers} @ivar headers: Request headers. @type type: native L{str} @ivar type: The scheme of the URI. @type host: native L{str} @ivar host: The host[:port] of the URI. @since: 11.1 """ def __init__(self, uri): """ Create a fake Urllib2 request. @param uri: Request URI. @type uri: L{bytes} """ self.uri = nativeString(uri) self.headers = Headers() _uri = URI.fromBytes(uri) self.type = nativeString(_uri.scheme) self.host = nativeString(_uri.host) if (_uri.scheme, _uri.port) not in ((b'http', 80), (b'https', 443)): # If it's not a schema on the regular port, add the port. self.host += ":" + str(_uri.port) if _PY3: self.origin_req_host = nativeString(_uri.host) self.unverifiable = lambda _: False def has_header(self, header): return self.headers.hasHeader(networkString(header)) def add_unredirected_header(self, name, value): self.headers.addRawHeader(networkString(name), networkString(value)) def get_full_url(self): return self.uri def get_header(self, name, default=None): headers = self.headers.getRawHeaders(networkString(name), default) if headers is not None: headers = [nativeString(x) for x in headers] return headers[0] return None def get_host(self): return self.host def get_type(self): return self.type def is_unverifiable(self): # In theory this shouldn't be hardcoded. return False class _FakeUrllib2Response(object): """ A fake C{urllib2.Response} object for C{cookielib} to work with. @type response: C{twisted.web.iweb.IResponse} @ivar response: Underlying Twisted Web response. @since: 11.1 """ def __init__(self, response): self.response = response def info(self): class _Meta(object): def getheaders(zelf, name): # PY2 headers = self.response.headers.getRawHeaders(name, []) return headers def get_all(zelf, name, default): # PY3 headers = self.response.headers.getRawHeaders( networkString(name), default) h = [nativeString(x) for x in headers] return h return _Meta() @implementer(IAgent) class CookieAgent(object): """ L{CookieAgent} extends the basic L{Agent} to add RFC-compliant handling of HTTP cookies. Cookies are written to and extracted from a C{cookielib.CookieJar} instance. The same cookie jar instance will be used for any requests through this agent, mutating it whenever a I{Set-Cookie} header appears in a response. @type _agent: L{twisted.web.client.Agent} @ivar _agent: Underlying Twisted Web agent to issue requests through. @type cookieJar: C{cookielib.CookieJar} @ivar cookieJar: Initialized cookie jar to read cookies from and store cookies to. @since: 11.1 """ def __init__(self, agent, cookieJar): self._agent = agent self.cookieJar = cookieJar def request(self, method, uri, headers=None, bodyProducer=None): """ Issue a new request to the wrapped L{Agent}. Send a I{Cookie} header if a cookie for C{uri} is stored in L{CookieAgent.cookieJar}. Cookies are automatically extracted and stored from requests. If a C{'cookie'} header appears in C{headers} it will override the automatic cookie header obtained from the cookie jar. @see: L{Agent.request} """ if headers is None: headers = Headers() lastRequest = _FakeUrllib2Request(uri) # Setting a cookie header explicitly will disable automatic request # cookies. if not headers.hasHeader(b'cookie'): self.cookieJar.add_cookie_header(lastRequest) cookieHeader = lastRequest.get_header('Cookie', None) if cookieHeader is not None: headers = headers.copy() headers.addRawHeader(b'cookie', networkString(cookieHeader)) d = self._agent.request(method, uri, headers, bodyProducer) d.addCallback(self._extractCookies, lastRequest) return d def _extractCookies(self, response, request): """ Extract response cookies and store them in the cookie jar. @type response: L{twisted.web.iweb.IResponse} @param response: Twisted Web response. @param request: A urllib2 compatible request object. """ resp = _FakeUrllib2Response(response) self.cookieJar.extract_cookies(resp, request) return response class GzipDecoder(proxyForInterface(IResponse)): """ A wrapper for a L{Response} instance which handles gzip'ed body. @ivar original: The original L{Response} object. @since: 11.1 """ def __init__(self, response): self.original = response self.length = UNKNOWN_LENGTH def deliverBody(self, protocol): """ Override C{deliverBody} to wrap the given C{protocol} with L{_GzipProtocol}. """ self.original.deliverBody(_GzipProtocol(protocol, self.original)) class _GzipProtocol(proxyForInterface(IProtocol)): """ A L{Protocol} implementation which wraps another one, transparently decompressing received data. @ivar _zlibDecompress: A zlib decompress object used to decompress the data stream. @ivar _response: A reference to the original response, in case of errors. @since: 11.1 """ def __init__(self, protocol, response): self.original = protocol self._response = response self._zlibDecompress = zlib.decompressobj(16 + zlib.MAX_WBITS) def dataReceived(self, data): """ Decompress C{data} with the zlib decompressor, forwarding the raw data to the original protocol. """ try: rawData = self._zlibDecompress.decompress(data) except zlib.error: raise ResponseFailed([Failure()], self._response) if rawData: self.original.dataReceived(rawData) def connectionLost(self, reason): """ Forward the connection lost event, flushing remaining data from the decompressor if any. """ try: rawData = self._zlibDecompress.flush() except zlib.error: raise ResponseFailed([reason, Failure()], self._response) if rawData: self.original.dataReceived(rawData) self.original.connectionLost(reason) @implementer(IAgent) class ContentDecoderAgent(object): """ An L{Agent} wrapper to handle encoded content. It takes care of declaring the support for content in the I{Accept-Encoding} header, and automatically decompresses the received data if it's effectively using compression. @param decoders: A list or tuple of (name, decoder) objects. The name declares which decoding the decoder supports, and the decoder must return a response object when called/instantiated. For example, C{(('gzip', GzipDecoder))}. The order determines how the decoders are going to be advertized to the server. @since: 11.1 """ def __init__(self, agent, decoders): self._agent = agent self._decoders = dict(decoders) self._supported = b','.join([decoder[0] for decoder in decoders]) def request(self, method, uri, headers=None, bodyProducer=None): """ Send a client request which declares supporting compressed content. @see: L{Agent.request}. """ if headers is None: headers = Headers() else: headers = headers.copy() headers.addRawHeader(b'accept-encoding', self._supported) deferred = self._agent.request(method, uri, headers, bodyProducer) return deferred.addCallback(self._handleResponse) def _handleResponse(self, response): """ Check if the response is encoded, and wrap it to handle decompression. """ contentEncodingHeaders = response.headers.getRawHeaders( b'content-encoding', []) contentEncodingHeaders = b','.join(contentEncodingHeaders).split(b',') while contentEncodingHeaders: name = contentEncodingHeaders.pop().strip() decoder = self._decoders.get(name) if decoder is not None: response = decoder(response) else: # Add it back contentEncodingHeaders.append(name) break if contentEncodingHeaders: response.headers.setRawHeaders( b'content-encoding', [b','.join(contentEncodingHeaders)]) else: response.headers.removeHeader(b'content-encoding') return response @implementer(IAgent) class RedirectAgent(object): """ An L{Agent} wrapper which handles HTTP redirects. The implementation is rather strict: 301 and 302 behaves like 307, not redirecting automatically on methods different from I{GET} and I{HEAD}. See L{BrowserLikeRedirectAgent} for a redirecting Agent that behaves more like a web browser. @param redirectLimit: The maximum number of times the agent is allowed to follow redirects before failing with a L{error.InfiniteRedirection}. @cvar _redirectResponses: A L{list} of HTTP status codes to be redirected for I{GET} and I{HEAD} methods. @cvar _seeOtherResponses: A L{list} of HTTP status codes to be redirected for any method and the method altered to I{GET}. @since: 11.1 """ _redirectResponses = [http.MOVED_PERMANENTLY, http.FOUND, http.TEMPORARY_REDIRECT] _seeOtherResponses = [http.SEE_OTHER] def __init__(self, agent, redirectLimit=20): self._agent = agent self._redirectLimit = redirectLimit def request(self, method, uri, headers=None, bodyProducer=None): """ Send a client request following HTTP redirects. @see: L{Agent.request}. """ deferred = self._agent.request(method, uri, headers, bodyProducer) return deferred.addCallback( self._handleResponse, method, uri, headers, 0) def _resolveLocation(self, requestURI, location): """ Resolve the redirect location against the request I{URI}. @type requestURI: C{bytes} @param requestURI: The request I{URI}. @type location: C{bytes} @param location: The redirect location. @rtype: C{bytes} @return: Final resolved I{URI}. """ return _urljoin(requestURI, location) def _handleRedirect(self, response, method, uri, headers, redirectCount): """ Handle a redirect response, checking the number of redirects already followed, and extracting the location header fields. """ if redirectCount >= self._redirectLimit: err = error.InfiniteRedirection( response.code, b'Infinite redirection detected', location=uri) raise ResponseFailed([Failure(err)], response) locationHeaders = response.headers.getRawHeaders(b'location', []) if not locationHeaders: err = error.RedirectWithNoLocation( response.code, b'No location header field', uri) raise ResponseFailed([Failure(err)], response) location = self._resolveLocation(uri, locationHeaders[0]) deferred = self._agent.request(method, location, headers) def _chainResponse(newResponse): newResponse.setPreviousResponse(response) return newResponse deferred.addCallback(_chainResponse) return deferred.addCallback( self._handleResponse, method, uri, headers, redirectCount + 1) def _handleResponse(self, response, method, uri, headers, redirectCount): """ Handle the response, making another request if it indicates a redirect. """ if response.code in self._redirectResponses: if method not in (b'GET', b'HEAD'): err = error.PageRedirect(response.code, location=uri) raise ResponseFailed([Failure(err)], response) return self._handleRedirect(response, method, uri, headers, redirectCount) elif response.code in self._seeOtherResponses: return self._handleRedirect(response, b'GET', uri, headers, redirectCount) return response class BrowserLikeRedirectAgent(RedirectAgent): """ An L{Agent} wrapper which handles HTTP redirects in the same fashion as web browsers. Unlike L{RedirectAgent}, the implementation is more relaxed: 301 and 302 behave like 303, redirecting automatically on any method and altering the redirect request to a I{GET}. @see: L{RedirectAgent} @since: 13.1 """ _redirectResponses = [http.TEMPORARY_REDIRECT] _seeOtherResponses = [http.MOVED_PERMANENTLY, http.FOUND, http.SEE_OTHER] class _ReadBodyProtocol(protocol.Protocol): """ Protocol that collects data sent to it. This is a helper for L{IResponse.deliverBody}, which collects the body and fires a deferred with it. @ivar deferred: See L{__init__}. @ivar status: See L{__init__}. @ivar message: See L{__init__}. @ivar dataBuffer: list of byte-strings received @type dataBuffer: L{list} of L{bytes} """ def __init__(self, status, message, deferred): """ @param status: Status of L{IResponse} @ivar status: L{int} @param message: Message of L{IResponse} @type message: L{bytes} @param deferred: deferred to fire when response is complete @type deferred: L{Deferred} firing with L{bytes} """ self.deferred = deferred self.status = status self.message = message self.dataBuffer = [] def dataReceived(self, data): """ Accumulate some more bytes from the response. """ self.dataBuffer.append(data) def connectionLost(self, reason): """ Deliver the accumulated response bytes to the waiting L{Deferred}, if the response body has been completely received without error. """ if reason.check(ResponseDone): self.deferred.callback(b''.join(self.dataBuffer)) elif reason.check(PotentialDataLoss): self.deferred.errback( PartialDownloadError(self.status, self.message, b''.join(self.dataBuffer))) else: self.deferred.errback(reason) def readBody(response): """ Get the body of an L{IResponse} and return it as a byte string. This is a helper function for clients that don't want to incrementally receive the body of an HTTP response. @param response: The HTTP response for which the body will be read. @type response: L{IResponse} provider @return: A L{Deferred} which will fire with the body of the response. Cancelling it will close the connection to the server immediately. """ def cancel(deferred): """ Cancel a L{readBody} call, close the connection to the HTTP server immediately, if it is still open. @param deferred: The cancelled L{defer.Deferred}. """ abort = getAbort() if abort is not None: abort() d = defer.Deferred(cancel) protocol = _ReadBodyProtocol(response.code, response.phrase, d) def getAbort(): return getattr(protocol.transport, 'abortConnection', None) response.deliverBody(protocol) if protocol.transport is not None and getAbort() is None: warnings.warn( 'Using readBody with a transport that does not have an ' 'abortConnection method', category=DeprecationWarning, stacklevel=2) return d __all__ = [ 'Agent', 'BrowserLikeRedirectAgent', 'ContentDecoderAgent', 'CookieAgent', 'downloadPage', 'getPage', 'GzipDecoder', 'HTTPClientFactory', 'HTTPConnectionPool', 'HTTPDownloader', 'HTTPPageDownloader', 'HTTPPageGetter', 'PartialDownloadError', 'ProxyAgent', 'readBody', 'RedirectAgent', 'RequestGenerationFailed', 'RequestTransmissionFailed', 'Response', 'ResponseDone', 'ResponseFailed', 'ResponseNeverReceived', 'URI', ]
./CrossVul/dataset_final_sorted/CWE-20/py/good_872_1
crossvul-python_data_good_1800_0
# -*- coding: utf-8 -*- # # This file is part of Radicale Server - Calendar Server # Copyright © 2014 Jean-Marc Martins # Copyright © 2014-2015 Guillaume Ayoub # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Radicale. If not, see <http://www.gnu.org/licenses/>. """ Multi files per calendar filesystem storage backend. """ import os import shutil import time import sys from . import filesystem from .. import ical from .. import log from .. import pathutils class Collection(filesystem.Collection): """Collection stored in several files per calendar.""" def _create_dirs(self): if not os.path.exists(self._filesystem_path): os.makedirs(self._filesystem_path) @property def headers(self): return ( ical.Header("PRODID:-//Radicale//NONSGML Radicale Server//EN"), ical.Header("VERSION:%s" % self.version)) def write(self): self._create_dirs() for component in self.components: text = ical.serialize( self.tag, self.headers, [component] + self.timezones) name = ( component.name if sys.version_info[0] >= 3 else component.name.encode(filesystem.FILESYSTEM_ENCODING)) if not pathutils.is_safe_filesystem_path_component(name): log.LOGGER.debug( "Can't tranlate name safely to filesystem, " "skipping component: %s", name) continue filesystem_path = os.path.join(self._filesystem_path, name) with filesystem.open(filesystem_path, "w") as fd: fd.write(text) def delete(self): shutil.rmtree(self._filesystem_path) os.remove(self._props_path) def remove(self, name): if not pathutils.is_safe_filesystem_path_component(name): log.LOGGER.debug( "Can't tranlate name safely to filesystem, " "skipping component: %s", name) return filesystem_path = os.path.join(self._filesystem_path, name) if os.path.exists(filesystem_path): os.remove(filesystem_path) @property def text(self): components = ( ical.Timezone, ical.Event, ical.Todo, ical.Journal, ical.Card) items = set() try: filenames = os.listdir(self._filesystem_path) except (OSError, IOError) as e: log.LOGGER.info('Error while reading collection %r: %r' % (self._filesystem_path, e)) return "" for filename in filenames: path = os.path.join(self._filesystem_path, filename) try: with filesystem.open(path) as fd: items.update(self._parse(fd.read(), components)) except (OSError, IOError) as e: log.LOGGER.warning('Error while reading item %r: %r' % (path, e)) return ical.serialize( self.tag, self.headers, sorted(items, key=lambda x: x.name)) @classmethod def is_node(cls, path): filesystem_path = pathutils.path_to_filesystem(path, filesystem.FOLDER) return (os.path.isdir(filesystem_path) and not os.path.exists(filesystem_path + ".props")) @classmethod def is_leaf(cls, path): filesystem_path = pathutils.path_to_filesystem(path, filesystem.FOLDER) return (os.path.isdir(filesystem_path) and os.path.exists(path + ".props")) @property def last_modified(self): last = max([ os.path.getmtime(os.path.join(self._filesystem_path, filename)) for filename in os.listdir(self._filesystem_path)] or [0]) return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(last))
./CrossVul/dataset_final_sorted/CWE-20/py/good_1800_0
crossvul-python_data_good_100_0
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains constants from the specification.""" # the "depth" field on events is limited to 2**63 - 1 MAX_DEPTH = 2**63 - 1 class Membership(object): """Represents the membership states of a user in a room.""" INVITE = u"invite" JOIN = u"join" KNOCK = u"knock" LEAVE = u"leave" BAN = u"ban" LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN) class PresenceState(object): """Represents the presence state of a user.""" OFFLINE = u"offline" UNAVAILABLE = u"unavailable" ONLINE = u"online" class JoinRules(object): PUBLIC = u"public" KNOCK = u"knock" INVITE = u"invite" PRIVATE = u"private" class LoginType(object): PASSWORD = u"m.login.password" EMAIL_IDENTITY = u"m.login.email.identity" MSISDN = u"m.login.msisdn" RECAPTCHA = u"m.login.recaptcha" DUMMY = u"m.login.dummy" # Only for C/S API v1 APPLICATION_SERVICE = u"m.login.application_service" SHARED_SECRET = u"org.matrix.login.shared_secret" class EventTypes(object): Member = "m.room.member" Create = "m.room.create" JoinRules = "m.room.join_rules" PowerLevels = "m.room.power_levels" Aliases = "m.room.aliases" Redaction = "m.room.redaction" ThirdPartyInvite = "m.room.third_party_invite" RoomHistoryVisibility = "m.room.history_visibility" CanonicalAlias = "m.room.canonical_alias" RoomAvatar = "m.room.avatar" GuestAccess = "m.room.guest_access" # These are used for validation Message = "m.room.message" Topic = "m.room.topic" Name = "m.room.name" class RejectedReason(object): AUTH_ERROR = "auth_error" REPLACED = "replaced" NOT_ANCESTOR = "not_ancestor" class RoomCreationPreset(object): PRIVATE_CHAT = "private_chat" PUBLIC_CHAT = "public_chat" TRUSTED_PRIVATE_CHAT = "trusted_private_chat" class ThirdPartyEntityKind(object): USER = "user" LOCATION = "location"
./CrossVul/dataset_final_sorted/CWE-20/py/good_100_0
crossvul-python_data_good_2141_0
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# import os import subprocess import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible import utils from ansible import errors import sys class InventoryScript(object): ''' Host inventory parser for ansible using external inventory scripts. ''' def __init__(self, filename=C.DEFAULT_HOST_LIST): # Support inventory scripts that are not prefixed with some # path information but happen to be in the current working # directory when '.' is not in PATH. self.filename = os.path.abspath(filename) cmd = [ self.filename, "--list" ] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (stdout, stderr) = sp.communicate() self.data = stdout # see comment about _meta below self.host_vars_from_top = None self.groups = self._parse(stderr) def _parse(self, err): all_hosts = {} self.raw = utils.parse_json(self.data, from_remote=True) all = Group('all') groups = dict(all=all) group = None if 'failed' in self.raw: sys.stderr.write(err + "\n") raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw) for (group_name, data) in self.raw.items(): # in Ansible 1.3 and later, a "_meta" subelement may contain # a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. if group_name == '_meta': if 'hostvars' in data: self.host_vars_from_top = data['hostvars'] continue if group_name != all.name: group = groups[group_name] = Group(group_name) else: group = all host = None if not isinstance(data, dict): data = {'hosts': data} elif not any(k in data for k in ('hosts','vars')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: for hostname in data['hosts']: if not hostname in all_hosts: all_hosts[hostname] = Host(hostname) host = all_hosts[hostname] group.add_host(host) if 'vars' in data: for k, v in data['vars'].iteritems(): if group.name == all.name: all.set_variable(k, v) else: group.set_variable(k, v) if group.name != all.name: all.add_child_group(group) # Separate loop to ensure all groups are defined for (group_name, data) in self.raw.items(): if group_name == '_meta': continue if isinstance(data, dict) and 'children' in data: for child_name in data['children']: if child_name in groups: groups[group_name].add_child_group(groups[child_name]) return groups def get_host_variables(self, host): """ Runs <script> --host <hostname> to determine additional host variables """ if self.host_vars_from_top is not None: got = self.host_vars_from_top.get(host.name, {}) return got cmd = [self.filename, "--host", host.name] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (out, err) = sp.communicate() return utils.parse_json(out)
./CrossVul/dataset_final_sorted/CWE-20/py/good_2141_0
crossvul-python_data_good_3523_0
import datetime from StringIO import StringIO from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.serializers import json from django.utils import simplejson from django.utils.encoding import force_unicode from tastypie.bundle import Bundle from tastypie.exceptions import UnsupportedFormat from tastypie.utils import format_datetime, format_date, format_time try: import lxml from lxml.etree import parse as parse_xml from lxml.etree import Element, tostring except ImportError: lxml = None try: import yaml from django.core.serializers import pyyaml except ImportError: yaml = None try: import biplist except ImportError: biplist = None class Serializer(object): """ A swappable class for serialization. This handles most types of data as well as the following output formats:: * json * jsonp * xml * yaml * html * plist (see http://explorapp.com/biplist/) It was designed to make changing behavior easy, either by overridding the various format methods (i.e. ``to_json``), by changing the ``formats/content_types`` options or by altering the other hook methods. """ formats = ['json', 'jsonp', 'xml', 'yaml', 'html', 'plist'] content_types = { 'json': 'application/json', 'jsonp': 'text/javascript', 'xml': 'application/xml', 'yaml': 'text/yaml', 'html': 'text/html', 'plist': 'application/x-plist', } def __init__(self, formats=None, content_types=None, datetime_formatting=None): self.supported_formats = [] self.datetime_formatting = getattr(settings, 'TASTYPIE_DATETIME_FORMATTING', 'iso-8601') if formats is not None: self.formats = formats if content_types is not None: self.content_types = content_types if datetime_formatting is not None: self.datetime_formatting = datetime_formatting for format in self.formats: try: self.supported_formats.append(self.content_types[format]) except KeyError: raise ImproperlyConfigured("Content type for specified type '%s' not found. Please provide it at either the class level or via the arguments." % format) def get_mime_for_format(self, format): """ Given a format, attempts to determine the correct MIME type. If not available on the current ``Serializer``, returns ``application/json`` by default. """ try: return self.content_types[format] except KeyError: return 'application/json' def format_datetime(self, data): """ A hook to control how datetimes are formatted. Can be overridden at the ``Serializer`` level (``datetime_formatting``) or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``). Default is ``iso-8601``, which looks like "2010-12-16T03:02:14". """ if self.datetime_formatting == 'rfc-2822': return format_datetime(data) return data.isoformat() def format_date(self, data): """ A hook to control how dates are formatted. Can be overridden at the ``Serializer`` level (``datetime_formatting``) or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``). Default is ``iso-8601``, which looks like "2010-12-16". """ if self.datetime_formatting == 'rfc-2822': return format_date(data) return data.isoformat() def format_time(self, data): """ A hook to control how times are formatted. Can be overridden at the ``Serializer`` level (``datetime_formatting``) or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``). Default is ``iso-8601``, which looks like "03:02:14". """ if self.datetime_formatting == 'rfc-2822': return format_time(data) return data.isoformat() def serialize(self, bundle, format='application/json', options={}): """ Given some data and a format, calls the correct method to serialize the data and returns the result. """ desired_format = None for short_format, long_format in self.content_types.items(): if format == long_format: if hasattr(self, "to_%s" % short_format): desired_format = short_format break if desired_format is None: raise UnsupportedFormat("The format indicated '%s' had no available serialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format) serialized = getattr(self, "to_%s" % desired_format)(bundle, options) return serialized def deserialize(self, content, format='application/json'): """ Given some data and a format, calls the correct method to deserialize the data and returns the result. """ desired_format = None format = format.split(';')[0] for short_format, long_format in self.content_types.items(): if format == long_format: if hasattr(self, "from_%s" % short_format): desired_format = short_format break if desired_format is None: raise UnsupportedFormat("The format indicated '%s' had no available deserialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format) deserialized = getattr(self, "from_%s" % desired_format)(content) return deserialized def to_simple(self, data, options): """ For a piece of data, attempts to recognize it and provide a simplified form of something complex. This brings complex Python data structures down to native types of the serialization format(s). """ if isinstance(data, (list, tuple)): return [self.to_simple(item, options) for item in data] if isinstance(data, dict): return dict((key, self.to_simple(val, options)) for (key, val) in data.iteritems()) elif isinstance(data, Bundle): return dict((key, self.to_simple(val, options)) for (key, val) in data.data.iteritems()) elif hasattr(data, 'dehydrated_type'): if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False: if data.full: return self.to_simple(data.fk_resource, options) else: return self.to_simple(data.value, options) elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True: if data.full: return [self.to_simple(bundle, options) for bundle in data.m2m_bundles] else: return [self.to_simple(val, options) for val in data.value] else: return self.to_simple(data.value, options) elif isinstance(data, datetime.datetime): return self.format_datetime(data) elif isinstance(data, datetime.date): return self.format_date(data) elif isinstance(data, datetime.time): return self.format_time(data) elif isinstance(data, bool): return data elif type(data) in (long, int, float): return data elif data is None: return None else: return force_unicode(data) def to_etree(self, data, options=None, name=None, depth=0): """ Given some data, converts that data to an ``etree.Element`` suitable for use in the XML output. """ if isinstance(data, (list, tuple)): element = Element(name or 'objects') if name: element = Element(name) element.set('type', 'list') else: element = Element('objects') for item in data: element.append(self.to_etree(item, options, depth=depth+1)) elif isinstance(data, dict): if depth == 0: element = Element(name or 'response') else: element = Element(name or 'object') element.set('type', 'hash') for (key, value) in data.iteritems(): element.append(self.to_etree(value, options, name=key, depth=depth+1)) elif isinstance(data, Bundle): element = Element(name or 'object') for field_name, field_object in data.data.items(): element.append(self.to_etree(field_object, options, name=field_name, depth=depth+1)) elif hasattr(data, 'dehydrated_type'): if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False: if data.full: return self.to_etree(data.fk_resource, options, name, depth+1) else: return self.to_etree(data.value, options, name, depth+1) elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True: if data.full: element = Element(name or 'objects') for bundle in data.m2m_bundles: element.append(self.to_etree(bundle, options, bundle.resource_name, depth+1)) else: element = Element(name or 'objects') for value in data.value: element.append(self.to_etree(value, options, name, depth=depth+1)) else: return self.to_etree(data.value, options, name) else: element = Element(name or 'value') simple_data = self.to_simple(data, options) data_type = get_type_string(simple_data) if data_type != 'string': element.set('type', get_type_string(simple_data)) if data_type != 'null': element.text = force_unicode(simple_data) return element def from_etree(self, data): """ Not the smartest deserializer on the planet. At the request level, it first tries to output the deserialized subelement called "object" or "objects" and falls back to deserializing based on hinted types in the XML element attribute "type". """ if data.tag == 'request': # if "object" or "objects" exists, return deserialized forms. elements = data.getchildren() for element in elements: if element.tag in ('object', 'objects'): return self.from_etree(element) return dict((element.tag, self.from_etree(element)) for element in elements) elif data.tag == 'object' or data.get('type') == 'hash': return dict((element.tag, self.from_etree(element)) for element in data.getchildren()) elif data.tag == 'objects' or data.get('type') == 'list': return [self.from_etree(element) for element in data.getchildren()] else: type_string = data.get('type') if type_string in ('string', None): return data.text elif type_string == 'integer': return int(data.text) elif type_string == 'float': return float(data.text) elif type_string == 'boolean': if data.text == 'True': return True else: return False else: return None def to_json(self, data, options=None): """ Given some Python data, produces JSON output. """ options = options or {} data = self.to_simple(data, options) return simplejson.dumps(data, cls=json.DjangoJSONEncoder, sort_keys=True) def from_json(self, content): """ Given some JSON data, returns a Python dictionary of the decoded data. """ return simplejson.loads(content) def to_jsonp(self, data, options=None): """ Given some Python data, produces JSON output wrapped in the provided callback. """ options = options or {} return '%s(%s)' % (options['callback'], self.to_json(data, options)) def to_xml(self, data, options=None): """ Given some Python data, produces XML output. """ options = options or {} if lxml is None: raise ImproperlyConfigured("Usage of the XML aspects requires lxml.") return tostring(self.to_etree(data, options), xml_declaration=True, encoding='utf-8') def from_xml(self, content): """ Given some XML data, returns a Python dictionary of the decoded data. """ if lxml is None: raise ImproperlyConfigured("Usage of the XML aspects requires lxml.") return self.from_etree(parse_xml(StringIO(content)).getroot()) def to_yaml(self, data, options=None): """ Given some Python data, produces YAML output. """ options = options or {} if yaml is None: raise ImproperlyConfigured("Usage of the YAML aspects requires yaml.") return yaml.dump(self.to_simple(data, options)) def from_yaml(self, content): """ Given some YAML data, returns a Python dictionary of the decoded data. """ if yaml is None: raise ImproperlyConfigured("Usage of the YAML aspects requires yaml.") return yaml.safe_load(content) def to_plist(self, data, options=None): """ Given some Python data, produces binary plist output. """ options = options or {} if biplist is None: raise ImproperlyConfigured("Usage of the plist aspects requires biplist.") return biplist.writePlistToString(self.to_simple(data, options)) def from_plist(self, content): """ Given some binary plist data, returns a Python dictionary of the decoded data. """ if biplist is None: raise ImproperlyConfigured("Usage of the plist aspects requires biplist.") return biplist.readPlistFromString(content) def to_html(self, data, options=None): """ Reserved for future usage. The desire is to provide HTML output of a resource, making an API available to a browser. This is on the TODO list but not currently implemented. """ options = options or {} return 'Sorry, not implemented yet. Please append "?format=json" to your URL.' def from_html(self, content): """ Reserved for future usage. The desire is to handle form-based (maybe Javascript?) input, making an API available to a browser. This is on the TODO list but not currently implemented. """ pass def get_type_string(data): """ Translates a Python data type into a string format. """ data_type = type(data) if data_type in (int, long): return 'integer' elif data_type == float: return 'float' elif data_type == bool: return 'boolean' elif data_type in (list, tuple): return 'list' elif data_type == dict: return 'hash' elif data is None: return 'null' elif isinstance(data, basestring): return 'string'
./CrossVul/dataset_final_sorted/CWE-20/py/good_3523_0
crossvul-python_data_good_2078_2
#!/usr/bin/env python from __future__ import division, absolute_import, print_function __all__ = ['run_main', 'compile', 'f2py_testing'] import os import sys import subprocess from . import f2py2e from . import f2py_testing from . import diagnose from .info import __doc__ run_main = f2py2e.run_main main = f2py2e.main def compile(source, modulename = 'untitled', extra_args = '', verbose = 1, source_fn = None ): ''' Build extension module from processing source with f2py. Read the source of this function for more information. ''' from numpy.distutils.exec_command import exec_command import tempfile if source_fn is None: f = tempfile.NamedTemporaryFile(suffix='.f') else: f = open(source_fn, 'w') try: f.write(source) f.flush() args = ' -c -m %s %s %s'%(modulename, f.name, extra_args) c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' % \ (sys.executable, args) s, o = exec_command(c) finally: f.close() return s from numpy.testing import Tester test = Tester().test bench = Tester().bench
./CrossVul/dataset_final_sorted/CWE-20/py/good_2078_2
crossvul-python_data_bad_117_3
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import abc import six from cryptography import utils @six.add_metaclass(abc.ABCMeta) class Mode(object): @abc.abstractproperty def name(self): """ A string naming this mode (e.g. "ECB", "CBC"). """ @abc.abstractmethod def validate_for_algorithm(self, algorithm): """ Checks that all the necessary invariants of this (mode, algorithm) combination are met. """ @six.add_metaclass(abc.ABCMeta) class ModeWithInitializationVector(object): @abc.abstractproperty def initialization_vector(self): """ The value of the initialization vector for this mode as bytes. """ @six.add_metaclass(abc.ABCMeta) class ModeWithTweak(object): @abc.abstractproperty def tweak(self): """ The value of the tweak for this mode as bytes. """ @six.add_metaclass(abc.ABCMeta) class ModeWithNonce(object): @abc.abstractproperty def nonce(self): """ The value of the nonce for this mode as bytes. """ @six.add_metaclass(abc.ABCMeta) class ModeWithAuthenticationTag(object): @abc.abstractproperty def tag(self): """ The value of the tag supplied to the constructor of this mode. """ def _check_aes_key_length(self, algorithm): if algorithm.key_size > 256 and algorithm.name == "AES": raise ValueError( "Only 128, 192, and 256 bit keys are allowed for this AES mode" ) def _check_iv_length(self, algorithm): if len(self.initialization_vector) * 8 != algorithm.block_size: raise ValueError("Invalid IV size ({0}) for {1}.".format( len(self.initialization_vector), self.name )) def _check_iv_and_key_length(self, algorithm): _check_aes_key_length(self, algorithm) _check_iv_length(self, algorithm) @utils.register_interface(Mode) @utils.register_interface(ModeWithInitializationVector) class CBC(object): name = "CBC" def __init__(self, initialization_vector): if not isinstance(initialization_vector, bytes): raise TypeError("initialization_vector must be bytes") self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") validate_for_algorithm = _check_iv_and_key_length @utils.register_interface(Mode) @utils.register_interface(ModeWithTweak) class XTS(object): name = "XTS" def __init__(self, tweak): if not isinstance(tweak, bytes): raise TypeError("tweak must be bytes") if len(tweak) != 16: raise ValueError("tweak must be 128-bits (16 bytes)") self._tweak = tweak tweak = utils.read_only_property("_tweak") def validate_for_algorithm(self, algorithm): if algorithm.key_size not in (256, 512): raise ValueError( "The XTS specification requires a 256-bit key for AES-128-XTS" " and 512-bit key for AES-256-XTS" ) @utils.register_interface(Mode) class ECB(object): name = "ECB" validate_for_algorithm = _check_aes_key_length @utils.register_interface(Mode) @utils.register_interface(ModeWithInitializationVector) class OFB(object): name = "OFB" def __init__(self, initialization_vector): if not isinstance(initialization_vector, bytes): raise TypeError("initialization_vector must be bytes") self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") validate_for_algorithm = _check_iv_and_key_length @utils.register_interface(Mode) @utils.register_interface(ModeWithInitializationVector) class CFB(object): name = "CFB" def __init__(self, initialization_vector): if not isinstance(initialization_vector, bytes): raise TypeError("initialization_vector must be bytes") self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") validate_for_algorithm = _check_iv_and_key_length @utils.register_interface(Mode) @utils.register_interface(ModeWithInitializationVector) class CFB8(object): name = "CFB8" def __init__(self, initialization_vector): if not isinstance(initialization_vector, bytes): raise TypeError("initialization_vector must be bytes") self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") validate_for_algorithm = _check_iv_and_key_length @utils.register_interface(Mode) @utils.register_interface(ModeWithNonce) class CTR(object): name = "CTR" def __init__(self, nonce): if not isinstance(nonce, bytes): raise TypeError("nonce must be bytes") self._nonce = nonce nonce = utils.read_only_property("_nonce") def validate_for_algorithm(self, algorithm): _check_aes_key_length(self, algorithm) if len(self.nonce) * 8 != algorithm.block_size: raise ValueError("Invalid nonce size ({0}) for {1}.".format( len(self.nonce), self.name )) @utils.register_interface(Mode) @utils.register_interface(ModeWithInitializationVector) @utils.register_interface(ModeWithAuthenticationTag) class GCM(object): name = "GCM" _MAX_ENCRYPTED_BYTES = (2 ** 39 - 256) // 8 _MAX_AAD_BYTES = (2 ** 64) // 8 def __init__(self, initialization_vector, tag=None, min_tag_length=16): # len(initialization_vector) must in [1, 2 ** 64), but it's impossible # to actually construct a bytes object that large, so we don't check # for it if not isinstance(initialization_vector, bytes): raise TypeError("initialization_vector must be bytes") self._initialization_vector = initialization_vector if tag is not None: if not isinstance(tag, bytes): raise TypeError("tag must be bytes or None") if min_tag_length < 4: raise ValueError("min_tag_length must be >= 4") if len(tag) < min_tag_length: raise ValueError( "Authentication tag must be {0} bytes or longer.".format( min_tag_length) ) self._tag = tag tag = utils.read_only_property("_tag") initialization_vector = utils.read_only_property("_initialization_vector") def validate_for_algorithm(self, algorithm): _check_aes_key_length(self, algorithm)
./CrossVul/dataset_final_sorted/CWE-20/py/bad_117_3
crossvul-python_data_bad_5568_1
# packet.py # # Copyright 2002-2005,2007 Wichert Akkerman <wichert@wiggy.net> # # A RADIUS packet as defined in RFC 2138 import struct import random try: import hashlib md5_constructor = hashlib.md5 except ImportError: # BBB for python 2.4 import md5 md5_constructor = md5.new import six from pyrad import tools # Packet codes AccessRequest = 1 AccessAccept = 2 AccessReject = 3 AccountingRequest = 4 AccountingResponse = 5 AccessChallenge = 11 StatusServer = 12 StatusClient = 13 DisconnectRequest = 40 DisconnectACK = 41 DisconnectNAK = 42 CoARequest = 43 CoAACK = 44 CoANAK = 45 # Current ID CurrentID = random.randrange(1, 255) class PacketError(Exception): pass class Packet(dict): """Packet acts like a standard python map to provide simple access to the RADIUS attributes. Since RADIUS allows for repeated attributes the value will always be a sequence. pyrad makes sure to preserve the ordering when encoding and decoding packets. There are two ways to use the map intereface: if attribute names are used pyrad take care of en-/decoding data. If the attribute type number (or a vendor ID/attribute type tuple for vendor attributes) is used you work with the raw data. Normally you will not use this class directly, but one of the :obj:`AuthPacket` or :obj:`AcctPacket` classes. """ def __init__(self, code=0, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param secret: secret needed to communicate with a RADIUS server :type secret: string :param id: packet identifaction number :type id: integer (8 bits) :param code: packet type code :type code: integer (8bits) :param packet: raw packet to decode :type packet: string """ dict.__init__(self) self.code = code if id is not None: self.id = id else: self.id = CreateID() if not isinstance(secret, six.binary_type): raise TypeError('secret must be a binary string') self.secret = secret if authenticator is not None and \ not isinstance(authenticator, six.binary_type): raise TypeError('authenticator must be a binary string') self.authenticator = authenticator if 'dict' in attributes: self.dict = attributes['dict'] if 'packet' in attributes: self.DecodePacket(attributes['packet']) for (key, value) in attributes.items(): if key in ['dict', 'fd', 'packet']: continue key = key.replace('_', '-') self.AddAttribute(key, value) def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return Packet(id=self.id, secret=self.secret, authenticator=self.authenticator, dict=self.dict, **attributes) def _DecodeValue(self, attr, value): if attr.values.HasBackward(value): return attr.values.GetBackward(value) else: return tools.DecodeAttr(attr.type, value) def _EncodeValue(self, attr, value): if attr.values.HasForward(value): return attr.values.GetForward(value) else: return tools.EncodeAttr(attr.type, value) def _EncodeKeyValues(self, key, values): if not isinstance(key, str): return (key, values) attr = self.dict.attributes[key] if attr.vendor: key = (self.dict.vendors.GetForward(attr.vendor), attr.code) else: key = attr.code return (key, [self._EncodeValue(attr, v) for v in values]) def _EncodeKey(self, key): if not isinstance(key, str): return key attr = self.dict.attributes[key] if attr.vendor: return (self.dict.vendors.GetForward(attr.vendor), attr.code) else: return attr.code def _DecodeKey(self, key): """Turn a key into a string if possible""" if self.dict.attrindex.HasBackward(key): return self.dict.attrindex.GetBackward(key) return key def AddAttribute(self, key, value): """Add an attribute to the packet. :param key: attribute name or identification :type key: string, attribute code or (vendor code, attribute code) tuple :param value: value :type value: depends on type of attribute """ (key, value) = self._EncodeKeyValues(key, [value]) value = value[0] self.setdefault(key, []).append(value) def __getitem__(self, key): if not isinstance(key, six.string_types): return dict.__getitem__(self, key) values = dict.__getitem__(self, self._EncodeKey(key)) attr = self.dict.attributes[key] res = [] for v in values: res.append(self._DecodeValue(attr, v)) return res def __contains__(self, key): try: return dict.__contains__(self, self._EncodeKey(key)) except KeyError: return False has_key = __contains__ def __delitem__(self, key): dict.__delitem__(self, self._EncodeKey(key)) def __setitem__(self, key, item): if isinstance(key, six.string_types): (key, item) = self._EncodeKeyValues(key, [item]) dict.__setitem__(self, key, item) else: assert isinstance(item, list) dict.__setitem__(self, key, item) def keys(self): return [self._DecodeKey(key) for key in dict.keys(self)] @staticmethod def CreateAuthenticator(): """Create a packet autenticator. All RADIUS packets contain a sixteen byte authenticator which is used to authenticate replies from the RADIUS server and in the password hiding algorithm. This function returns a suitable random string that can be used as an authenticator. :return: valid packet authenticator :rtype: binary string """ data = [] for i in range(16): data.append(random.randrange(0, 256)) if six.PY3: return bytes(data) else: return ''.join(chr(b) for b in data) def CreateID(self): """Create a packet ID. All RADIUS requests have a ID which is used to identify a request. This is used to detect retries and replay attacks. This function returns a suitable random number that can be used as ID. :return: ID number :rtype: integer """ return random.randrange(0, 256) def ReplyPacket(self): """Create a ready-to-transmit authentication reply packet. Returns a RADIUS packet which can be directly transmitted to a RADIUS server. This differs with Packet() in how the authenticator is calculated. :return: raw packet :rtype: string """ assert(self.authenticator) assert(self.secret) attr = self._PktEncodeAttributes() header = struct.pack('!BBH', self.code, self.id, (20 + len(attr))) authenticator = md5_constructor(header[0:4] + self.authenticator + attr + self.secret).digest() return header + authenticator + attr def VerifyReply(self, reply, rawreply=None): if reply.id != self.id: return False if rawreply is None: rawreply = reply.ReplyPacket() hash = md5_constructor(rawreply[0:4] + self.authenticator + rawreply[20:] + self.secret).digest() if hash != rawreply[4:20]: return False return True def _PktEncodeAttribute(self, key, value): if isinstance(key, tuple): value = struct.pack('!L', key[0]) + \ self._PktEncodeAttribute(key[1], value) key = 26 return struct.pack('!BB', key, (len(value) + 2)) + value def _PktEncodeAttributes(self): result = six.b('') for (code, datalst) in self.items(): for data in datalst: result += self._PktEncodeAttribute(code, data) return result def _PktDecodeVendorAttribute(self, data): # Check if this packet is long enough to be in the # RFC2865 recommended form if len(data) < 6: return (26, data) (vendor, type, length) = struct.unpack('!LBB', data[:6])[0:3] # Another sanity check if len(data) != length + 4: return (26, data) return ((vendor, type), data[6:]) def DecodePacket(self, packet): """Initialize the object from raw packet data. Decode a packet as received from the network and decode it. :param packet: raw packet :type packet: string""" try: (self.code, self.id, length, self.authenticator) = \ struct.unpack('!BBH16s', packet[0:20]) except struct.error: raise PacketError('Packet header is corrupt') if len(packet) != length: raise PacketError('Packet has invalid length') if length > 8192: raise PacketError('Packet length is too long (%d)' % length) self.clear() packet = packet[20:] while packet: try: (key, attrlen) = struct.unpack('!BB', packet[0:2]) except struct.error: raise PacketError('Attribute header is corrupt') if attrlen < 2: raise PacketError( 'Attribute length is too small (%d)' % attrlen) value = packet[2:attrlen] if key == 26: (key, value) = self._PktDecodeVendorAttribute(value) self.setdefault(key, []).append(value) packet = packet[attrlen:] class AuthPacket(Packet): def __init__(self, code=AccessRequest, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param code: packet type code :type code: integer (8bits) :param id: packet identifaction number :type id: integer (8 bits) :param secret: secret needed to communicate with a RADIUS server :type secret: string :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param packet: raw packet to decode :type packet: string """ Packet.__init__(self, code, id, secret, authenticator, **attributes) def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return AuthPacket(AccessAccept, self.id, self.secret, self.authenticator, dict=self.dict, **attributes) def RequestPacket(self): """Create a ready-to-transmit authentication request packet. Return a RADIUS packet which can be directly transmitted to a RADIUS server. :return: raw packet :rtype: string """ attr = self._PktEncodeAttributes() if self.authenticator is None: self.authenticator = self.CreateAuthenticator() if self.id is None: self.id = self.CreateID() header = struct.pack('!BBH16s', self.code, self.id, (20 + len(attr)), self.authenticator) return header + attr def PwDecrypt(self, password): """Unobfuscate a RADIUS password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. This function reverses the obfuscation process. :param password: obfuscated form of password :type password: binary string :return: plaintext password :rtype: unicode string """ buf = password pw = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): pw += bytes((hash[i] ^ buf[i],)) else: for i in range(16): pw += chr(ord(hash[i]) ^ ord(buf[i])) (last, buf) = (buf[:16], buf[16:]) while pw.endswith(six.b('\x00')): pw = pw[:-1] return pw.decode('utf-8') def PwCrypt(self, password): """Obfuscate password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. If no authenticator has been set before calling PwCrypt one is created automatically. Changing the authenticator after setting a password that has been encrypted using this function will not work. :param password: plaintext password :type password: unicode stringn :return: obfuscated version of the password :rtype: binary string """ if self.authenticator is None: self.authenticator = self.CreateAuthenticator() if isinstance(password, six.text_type): password = password.encode('utf-8') buf = password if len(password) % 16 != 0: buf += six.b('\x00') * (16 - (len(password) % 16)) hash = md5_constructor(self.secret + self.authenticator).digest() result = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): result += bytes((hash[i] ^ buf[i],)) else: for i in range(16): result += chr(ord(hash[i]) ^ ord(buf[i])) last = result[-16:] buf = buf[16:] return result class AcctPacket(Packet): """RADIUS accounting packets. This class is a specialization of the generic :obj:`Packet` class for accounting packets. """ def __init__(self, code=AccountingRequest, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param secret: secret needed to communicate with a RADIUS server :type secret: string :param id: packet identifaction number :type id: integer (8 bits) :param code: packet type code :type code: integer (8bits) :param packet: raw packet to decode :type packet: string """ Packet.__init__(self, code, id, secret, authenticator, **attributes) if 'packet' in attributes: self.raw_packet = attributes['packet'] def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return AcctPacket(AccountingResponse, self.id, self.secret, self.authenticator, dict=self.dict, **attributes) def VerifyAcctRequest(self): """Verify request authenticator. :return: True if verification failed else False :rtype: boolean """ assert(self.raw_packet) hash = md5_constructor(self.raw_packet[0:4] + 16 * six.b('\x00') + self.raw_packet[20:] + self.secret).digest() return hash == self.authenticator def RequestPacket(self): """Create a ready-to-transmit authentication request packet. Return a RADIUS packet which can be directly transmitted to a RADIUS server. :return: raw packet :rtype: string """ attr = self._PktEncodeAttributes() if self.id is None: self.id = self.CreateID() header = struct.pack('!BBH', self.code, self.id, (20 + len(attr))) self.authenticator = md5_constructor(header[0:4] + 16 * six.b('\x00') + attr + self.secret).digest() return header + self.authenticator + attr def CreateID(): """Generate a packet ID. :return: packet ID :rtype: 8 bit integer """ global CurrentID CurrentID = (CurrentID + 1) % 256 return CurrentID
./CrossVul/dataset_final_sorted/CWE-20/py/bad_5568_1
crossvul-python_data_bad_100_1
# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from synapse.api.errors import SynapseError from synapse.crypto.event_signing import check_event_content_hash from synapse.events import FrozenEvent from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_request from synapse.util import unwrapFirstError, logcontext from twisted.internet import defer logger = logging.getLogger(__name__) class FederationBase(object): def __init__(self, hs): self.hs = hs self.server_name = hs.hostname self.keyring = hs.get_keyring() self.spam_checker = hs.get_spam_checker() self.store = hs.get_datastore() self._clock = hs.get_clock() @defer.inlineCallbacks def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False, include_none=False): """Takes a list of PDUs and checks the signatures and hashs of each one. If a PDU fails its signature check then we check if we have it in the database and if not then request if from the originating server of that PDU. If a PDU fails its content hash check then it is redacted. The given list of PDUs are not modified, instead the function returns a new list. Args: pdu (list) outlier (bool) Returns: Deferred : A list of PDUs that have valid signatures and hashes. """ deferreds = self._check_sigs_and_hashes(pdus) @defer.inlineCallbacks def handle_check_result(pdu, deferred): try: res = yield logcontext.make_deferred_yieldable(deferred) except SynapseError: res = None if not res: # Check local db. res = yield self.store.get_event( pdu.event_id, allow_rejected=True, allow_none=True, ) if not res and pdu.origin != origin: try: res = yield self.get_pdu( destinations=[pdu.origin], event_id=pdu.event_id, outlier=outlier, timeout=10000, ) except SynapseError: pass if not res: logger.warn( "Failed to find copy of %s with valid signature", pdu.event_id, ) defer.returnValue(res) handle = logcontext.preserve_fn(handle_check_result) deferreds2 = [ handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds) ] valid_pdus = yield logcontext.make_deferred_yieldable( defer.gatherResults( deferreds2, consumeErrors=True, ) ).addErrback(unwrapFirstError) if include_none: defer.returnValue(valid_pdus) else: defer.returnValue([p for p in valid_pdus if p]) def _check_sigs_and_hash(self, pdu): return logcontext.make_deferred_yieldable( self._check_sigs_and_hashes([pdu])[0], ) def _check_sigs_and_hashes(self, pdus): """Checks that each of the received events is correctly signed by the sending server. Args: pdus (list[FrozenEvent]): the events to be checked Returns: list[Deferred]: for each input event, a deferred which: * returns the original event if the checks pass * returns a redacted version of the event (if the signature matched but the hash did not) * throws a SynapseError if the signature check failed. The deferreds run their callbacks in the sentinel logcontext. """ redacted_pdus = [ prune_event(pdu) for pdu in pdus ] deferreds = self.keyring.verify_json_objects_for_server([ (p.origin, p.get_pdu_json()) for p in redacted_pdus ]) ctx = logcontext.LoggingContext.current_context() def callback(_, pdu, redacted): with logcontext.PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): logger.warn( "Event content has been tampered, redacting %s: %s", pdu.event_id, pdu.get_pdu_json() ) return redacted if self.spam_checker.check_event_for_spam(pdu): logger.warn( "Event contains spam, redacting %s: %s", pdu.event_id, pdu.get_pdu_json() ) return redacted return pdu def errback(failure, pdu): failure.trap(SynapseError) with logcontext.PreserveLoggingContext(ctx): logger.warn( "Signature check failed for %s", pdu.event_id, ) return failure for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus): deferred.addCallbacks( callback, errback, callbackArgs=[pdu, redacted], errbackArgs=[pdu], ) return deferreds def event_from_pdu_json(pdu_json, outlier=False): """Construct a FrozenEvent from an event json received over federation Args: pdu_json (object): pdu as received over federation outlier (bool): True to mark this event as an outlier Returns: FrozenEvent Raises: SynapseError: if the pdu is missing required fields """ # we could probably enforce a bunch of other fields here (room_id, sender, # origin, etc etc) assert_params_in_request(pdu_json, ('event_id', 'type')) event = FrozenEvent( pdu_json ) event.internal_metadata.outlier = outlier return event
./CrossVul/dataset_final_sorted/CWE-20/py/bad_100_1
crossvul-python_data_bad_2141_2
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible import utils class ReturnData(object): ''' internal return class for runner execute methods, not part of public API signature ''' __slots__ = [ 'result', 'comm_ok', 'host', 'diff' ] def __init__(self, conn=None, host=None, result=None, comm_ok=True, diff=dict()): # which host is this ReturnData about? if conn is not None: self.host = conn.host delegate = getattr(conn, 'delegate', None) if delegate is not None: self.host = delegate else: self.host = host self.result = result self.comm_ok = comm_ok # if these values are set and used with --diff we can show # changes made to particular files self.diff = diff if type(self.result) in [ str, unicode ]: self.result = utils.parse_json(self.result) if self.host is None: raise Exception("host not set") if type(self.result) != dict: raise Exception("dictionary result expected") def communicated_ok(self): return self.comm_ok def is_successful(self): return self.comm_ok and (self.result.get('failed', False) == False) and ('failed_when_result' in self.result and [not self.result['failed_when_result']] or [self.result.get('rc',0) == 0])[0]
./CrossVul/dataset_final_sorted/CWE-20/py/bad_2141_2
crossvul-python_data_bad_3766_0
from django.conf.urls import patterns, url from django.contrib.auth import context_processors from django.contrib.auth.urls import urlpatterns from django.contrib.auth.views import password_reset from django.contrib.auth.decorators import login_required from django.contrib.messages.api import info from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import Template, RequestContext from django.views.decorators.cache import never_cache @never_cache def remote_user_auth_view(request): "Dummy view for remote user tests" t = Template("Username is {{ user }}.") c = RequestContext(request, {}) return HttpResponse(t.render(c)) def auth_processor_no_attr_access(request): r1 = render_to_response('context_processors/auth_attrs_no_access.html', RequestContext(request, {}, processors=[context_processors.auth])) # *After* rendering, we check whether the session was accessed return render_to_response('context_processors/auth_attrs_test_access.html', {'session_accessed':request.session.accessed}) def auth_processor_attr_access(request): r1 = render_to_response('context_processors/auth_attrs_access.html', RequestContext(request, {}, processors=[context_processors.auth])) return render_to_response('context_processors/auth_attrs_test_access.html', {'session_accessed':request.session.accessed}) def auth_processor_user(request): return render_to_response('context_processors/auth_attrs_user.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_perms(request): return render_to_response('context_processors/auth_attrs_perms.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_messages(request): info(request, "Message 1") return render_to_response('context_processors/auth_attrs_messages.html', RequestContext(request, {}, processors=[context_processors.auth])) def userpage(request): pass # special urls for auth test cases urlpatterns = urlpatterns + patterns('', (r'^logout/custom_query/$', 'django.contrib.auth.views.logout', dict(redirect_field_name='follow')), (r'^logout/next_page/$', 'django.contrib.auth.views.logout', dict(next_page='/somewhere/')), (r'^remote_user/$', remote_user_auth_view), (r'^password_reset_from_email/$', 'django.contrib.auth.views.password_reset', dict(from_email='staffmember@example.com')), (r'^login_required/$', login_required(password_reset)), (r'^login_required_login_url/$', login_required(password_reset, login_url='/somewhere/')), (r'^auth_processor_no_attr_access/$', auth_processor_no_attr_access), (r'^auth_processor_attr_access/$', auth_processor_attr_access), (r'^auth_processor_user/$', auth_processor_user), (r'^auth_processor_perms/$', auth_processor_perms), (r'^auth_processor_messages/$', auth_processor_messages), url(r'^userpage/(.+)/$', userpage, name="userpage"), )
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3766_0
crossvul-python_data_bad_3723_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Forms used for Horizon's auth mechanisms. """ import logging from django import shortcuts from django.conf import settings from django.contrib import messages from django.contrib.auth import REDIRECT_FIELD_NAME from django.utils.translation import ugettext as _ from keystoneclient import exceptions as keystone_exceptions from horizon import api from horizon import base from horizon import exceptions from horizon import forms from horizon import users LOG = logging.getLogger(__name__) def _set_session_data(request, token): request.session['serviceCatalog'] = token.serviceCatalog request.session['tenant'] = token.tenant['name'] request.session['tenant_id'] = token.tenant['id'] request.session['token'] = token.id request.session['user_name'] = token.user['name'] request.session['user_id'] = token.user['id'] request.session['roles'] = token.user['roles'] class Login(forms.SelfHandlingForm): """ Form used for logging in a user. Handles authentication with Keystone, choosing a tenant, and fetching a scoped token token for that tenant. Redirects to the URL returned by :meth:`horizon.get_user_home` if successful. Subclass of :class:`~horizon.forms.SelfHandlingForm`. """ region = forms.ChoiceField(label=_("Region"), required=False) username = forms.CharField(label=_("User Name")) password = forms.CharField(label=_("Password"), widget=forms.PasswordInput(render_value=False)) def __init__(self, *args, **kwargs): super(Login, self).__init__(*args, **kwargs) # FIXME(gabriel): When we switch to region-only settings, we can # remove this default region business. default_region = (settings.OPENSTACK_KEYSTONE_URL, "Default Region") regions = getattr(settings, 'AVAILABLE_REGIONS', [default_region]) self.fields['region'].choices = regions if len(regions) == 1: self.fields['region'].initial = default_region[0] self.fields['region'].widget = forms.widgets.HiddenInput() def handle(self, request, data): if 'user_name' in request.session: if request.session['user_name'] != data['username']: # To avoid reusing another user's session, create a # new, empty session if the existing session # corresponds to a different authenticated user. request.session.flush() # Always cycle the session key when viewing the login form to # prevent session fixation request.session.cycle_key() # For now we'll allow fallback to OPENSTACK_KEYSTONE_URL if the # form post doesn't include a region. endpoint = data.get('region', None) or settings.OPENSTACK_KEYSTONE_URL region_name = dict(self.fields['region'].choices)[endpoint] request.session['region_endpoint'] = endpoint request.session['region_name'] = region_name redirect_to = request.REQUEST.get(REDIRECT_FIELD_NAME, "") if data.get('tenant', None): try: token = api.token_create(request, data.get('tenant'), data['username'], data['password']) tenants = api.tenant_list_for_token(request, token.id) except: msg = _('Unable to authenticate for that project.') exceptions.handle(request, message=msg, escalate=True) _set_session_data(request, token) user = users.get_user_from_request(request) redirect = redirect_to or base.Horizon.get_user_home(user) return shortcuts.redirect(redirect) elif data.get('username', None): try: unscoped_token = api.token_create(request, '', data['username'], data['password']) except keystone_exceptions.Unauthorized: exceptions.handle(request, _('Invalid user name or password.')) except: # If we get here we don't want to show a stack trace to the # user. However, if we fail here, there may be bad session # data that's been cached already. request.user_logout() exceptions.handle(request, message=_("An error occurred authenticating." " Please try again later."), escalate=True) # Unscoped token request.session['unscoped_token'] = unscoped_token.id request.user.username = data['username'] # Get the tenant list, and log in using first tenant # FIXME (anthony): add tenant chooser here? try: tenants = api.tenant_list_for_token(request, unscoped_token.id) except: exceptions.handle(request) tenants = [] # Abort if there are no valid tenants for this user if not tenants: messages.error(request, _('You are not authorized for any projects.') % {"user": data['username']}, extra_tags="login") return # Create a token. # NOTE(gabriel): Keystone can return tenants that you're # authorized to administer but not to log into as a user, so in # the case of an Unauthorized error we should iterate through # the tenants until one succeeds or we've failed them all. while tenants: tenant = tenants.pop() try: token = api.token_create_scoped(request, tenant.id, unscoped_token.id) break except: # This will continue for recognized Unauthorized # exceptions from keystoneclient. exceptions.handle(request, ignore=True) token = None if token is None: raise exceptions.NotAuthorized( _("You are not authorized for any available projects.")) _set_session_data(request, token) user = users.get_user_from_request(request) redirect = redirect_to or base.Horizon.get_user_home(user) return shortcuts.redirect(redirect) class LoginWithTenant(Login): """ Exactly like :class:`.Login` but includes the tenant id as a field so that the process of choosing a default tenant is bypassed. """ region = forms.ChoiceField(required=False) username = forms.CharField(max_length="20", widget=forms.TextInput(attrs={'readonly': 'readonly'})) tenant = forms.CharField(widget=forms.HiddenInput())
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3723_0
crossvul-python_data_good_3767_0
from django.conf.urls import patterns, url from django.contrib.auth import context_processors from django.contrib.auth.urls import urlpatterns from django.contrib.auth.views import password_reset from django.contrib.auth.decorators import login_required from django.contrib.messages.api import info from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import Template, RequestContext from django.views.decorators.cache import never_cache @never_cache def remote_user_auth_view(request): "Dummy view for remote user tests" t = Template("Username is {{ user }}.") c = RequestContext(request, {}) return HttpResponse(t.render(c)) def auth_processor_no_attr_access(request): r1 = render_to_response('context_processors/auth_attrs_no_access.html', RequestContext(request, {}, processors=[context_processors.auth])) # *After* rendering, we check whether the session was accessed return render_to_response('context_processors/auth_attrs_test_access.html', {'session_accessed':request.session.accessed}) def auth_processor_attr_access(request): r1 = render_to_response('context_processors/auth_attrs_access.html', RequestContext(request, {}, processors=[context_processors.auth])) return render_to_response('context_processors/auth_attrs_test_access.html', {'session_accessed':request.session.accessed}) def auth_processor_user(request): return render_to_response('context_processors/auth_attrs_user.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_perms(request): return render_to_response('context_processors/auth_attrs_perms.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_perm_in_perms(request): return render_to_response('context_processors/auth_attrs_perm_in_perms.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_messages(request): info(request, "Message 1") return render_to_response('context_processors/auth_attrs_messages.html', RequestContext(request, {}, processors=[context_processors.auth])) def userpage(request): pass # special urls for auth test cases urlpatterns = urlpatterns + patterns('', (r'^logout/custom_query/$', 'django.contrib.auth.views.logout', dict(redirect_field_name='follow')), (r'^logout/next_page/$', 'django.contrib.auth.views.logout', dict(next_page='/somewhere/')), (r'^remote_user/$', remote_user_auth_view), (r'^password_reset_from_email/$', 'django.contrib.auth.views.password_reset', dict(from_email='staffmember@example.com')), (r'^admin_password_reset/$', 'django.contrib.auth.views.password_reset', dict(is_admin_site=True)), (r'^login_required/$', login_required(password_reset)), (r'^login_required_login_url/$', login_required(password_reset, login_url='/somewhere/')), (r'^auth_processor_no_attr_access/$', auth_processor_no_attr_access), (r'^auth_processor_attr_access/$', auth_processor_attr_access), (r'^auth_processor_user/$', auth_processor_user), (r'^auth_processor_perms/$', auth_processor_perms), (r'^auth_processor_perm_in_perms/$', auth_processor_perm_in_perms), (r'^auth_processor_messages/$', auth_processor_messages), url(r'^userpage/(.+)/$', userpage, name="userpage"), )
./CrossVul/dataset_final_sorted/CWE-20/py/good_3767_0
crossvul-python_data_good_8_0
from importlib import import_module from os import path, listdir from string import lower from debug import logger import paths class MsgBase(object): def encode(self): self.data = {"": lower(type(self).__name__)} def constructObject(data): try: m = import_module("messagetypes." + data[""]) classBase = getattr(m, data[""].title()) except (NameError, ImportError): logger.error("Don't know how to handle message type: \"%s\"", data[""], exc_info=True) return None try: returnObj = classBase() returnObj.decode(data) except KeyError as e: logger.error("Missing mandatory key %s", e) return None except: logger.error("classBase fail", exc_info=True) return None else: return returnObj if paths.frozen is not None: import messagetypes.message import messagetypes.vote else: for mod in listdir(path.dirname(__file__)): if mod == "__init__.py": continue splitted = path.splitext(mod) if splitted[1] != ".py": continue try: import_module("." + splitted[0], "messagetypes") except ImportError: logger.error("Error importing %s", mod, exc_info=True) else: logger.debug("Imported message type module %s", mod)
./CrossVul/dataset_final_sorted/CWE-20/py/good_8_0
crossvul-python_data_good_3499_4
import os import pipes from subprocess import Popen, PIPE import Bcfg2.Server.Plugin # for debugging output only import logging logger = logging.getLogger('Bcfg2.Plugins.Svn') class Svn(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Version): """Svn is a version plugin for dealing with Bcfg2 repos.""" name = 'Svn' __version__ = '$Id$' __author__ = 'bcfg-dev@mcs.anl.gov' def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) self.core = core self.datastore = datastore # path to svn directory for bcfg2 repo svn_dir = "%s/.svn" % datastore # Read revision from bcfg2 repo if os.path.isdir(svn_dir): self.get_revision() else: logger.error("%s is not a directory" % svn_dir) raise Bcfg2.Server.Plugin.PluginInitError logger.debug("Initialized svn plugin with svn directory = %s" % svn_dir) def get_revision(self): """Read svn revision information for the Bcfg2 repository.""" try: data = Popen(("env LC_ALL=C svn info %s" % pipes.quote(self.datastore)), shell=True, stdout=PIPE).communicate()[0].split('\n') return [line.split(': ')[1] for line in data \ if line[:9] == 'Revision:'][-1] except IndexError: logger.error("Failed to read svn info; disabling svn support") logger.error('''Ran command "svn info %s"''' % (self.datastore)) logger.error("Got output: %s" % data) raise Bcfg2.Server.Plugin.PluginInitError
./CrossVul/dataset_final_sorted/CWE-20/py/good_3499_4
crossvul-python_data_bad_50_7
# -*- coding: utf-8 -*- # # (c) Cornelius Kölbel # License: AGPLv3 # contact: http://www.privacyidea.org # # This code is free software; you can redistribute it and/or # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE # License as published by the Free Software Foundation; either # version 3 of the License, or any later version. # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU AFFERO GENERAL PUBLIC LICENSE for more details. # # You should have received a copy of the GNU Affero General Public # License along with this program. If not, see <http://www.gnu.org/licenses/>. # import logging import functools from privacyidea.lib.error import TokenAdminError from privacyidea.lib.error import ParameterError from privacyidea.lib import _ log = logging.getLogger(__name__) def check_token_locked(func): """ Decorator to check if a token is locked or not. The decorator is to be used in token class methods. It can be used to avoid performing an action on a locked token. If the token is locked, a TokenAdminError is raised. """ @functools.wraps(func) def token_locked_wrapper(*args, **kwds): # The token object token = args[0] if token.is_locked(): raise TokenAdminError(_("This action is not possible, since the " "token is locked"), id=1007) f_result = func(*args, **kwds) return f_result return token_locked_wrapper def check_user_or_serial(func): """ Decorator to check user and serial at the beginning of a function The wrapper will check the parameters user and serial and verify that not both parameters are None. Otherwise it will throw an exception ParameterError. """ @functools.wraps(func) def user_or_serial_wrapper(*args, **kwds): # If there is no user and serial keyword parameter and if # there is no normal argument, we do not have enough information serial = kwds.get("serial") user = kwds.get("user") # We have no serial! The serial would be the first arg if (serial is None and (len(args) == 0 or args[0] is None) and (user is None or (user is not None and user.is_empty()))): # We either have an empty User object or None raise ParameterError(ParameterError.USER_OR_SERIAL) f_result = func(*args, **kwds) return f_result return user_or_serial_wrapper class check_user_or_serial_in_request(object): """ Decorator to check user and serial in a request. If the request does not contain a serial number (serial) or a user (user) it will throw a ParameterError. """ def __init__(self, request): self.request = request def __call__(self, func): @functools.wraps(func) def check_user_or_serial_in_request_wrapper(*args, **kwds): user = self.request.all_data.get("user") serial = self.request.all_data.get("serial") if not serial and not user: raise ParameterError(_("You need to specify a serial or a user.")) f_result = func(*args, **kwds) return f_result return check_user_or_serial_in_request_wrapper def check_copy_serials(func): """ Decorator to check if the serial_from and serial_to exist. If the serials are not unique, we raise an error """ from privacyidea.lib.token import get_tokens @functools.wraps(func) def check_serial_wrapper(*args, **kwds): tokenobject_list_from = get_tokens(serial=args[0]) tokenobject_list_to = get_tokens(serial=args[1]) if len(tokenobject_list_from) != 1: log.error("not a unique token to copy from found") raise(TokenAdminError("No unique token to copy from found", id=1016)) if len(tokenobject_list_to) != 1: log.error("not a unique token to copy to found") raise(TokenAdminError("No unique token to copy to found", id=1017)) f_result = func(*args, **kwds) return f_result return check_serial_wrapper
./CrossVul/dataset_final_sorted/CWE-20/py/bad_50_7
crossvul-python_data_good_50_5
# -*- coding: utf-8 -*- # # privacyIDEA documentation build configuration file, created by # sphinx-quickstart on Fri Jun 13 07:31:01 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.23.2' # The full version, including alpha/beta/rc tags. #release = '2.16dev5' release = version import sys import os from mock import Mock as MagicMock class Mock(MagicMock): @classmethod def __getattr__(cls, name): return MagicMock() #MOCK_MODULES = ['pandas', 'pyOpenSSL'] MOCK_MODULES = [] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # Monkey-patch functools.wraps # http://stackoverflow.com/questions/28366818/preserve-default-arguments-of-wrapped-decorated-python-function-in-sphinx-docume import functools def no_op_wraps(func): """Replaces functools.wraps in order to undo wrapping. Can be used to preserve the decorated function's signature in the documentation generated by Sphinx. """ def wrapper(decorator): return func return wrapper functools.wraps = no_op_wraps # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) sys.path.append(os.path.abspath('_themes/flask-sphinx-themes')) sys.path.insert(0, os.path.abspath('../privacyidea')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'sphinxcontrib.autohttp.flask'] http_index_ignore_prefixes = ['/token'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'privacyIDEA' copyright = u'2014-2017, Cornelius Kölbel' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #html_theme = 'sphinxdoc' #html_theme = 'sphinx_rtd_theme' #html_theme = 'agogo' html_theme = 'flask' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes/flask-sphinx-themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "images/privacyidea-color.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'privacyIDEAdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'privacyIDEA.tex', u'privacyIDEA Authentication System', u'Cornelius Kölbel', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'privacyidea-server', u'privacyIDEA Authentication System', [u'Cornelius Kölbel'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'privacyIDEA', u'privacyIDEA AUthentication System', u'Cornelius Kölbel', 'privacyIDEA', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
./CrossVul/dataset_final_sorted/CWE-20/py/good_50_5
crossvul-python_data_bad_3767_1
import os import re from django.conf import global_settings, settings from django.contrib.sites.models import Site, RequestSite from django.contrib.auth.models import User from django.core import mail from django.core.urlresolvers import reverse, NoReverseMatch from django.http import QueryDict from django.utils.encoding import force_text from django.utils.html import escape from django.utils.http import urlquote from django.test import TestCase from django.test.utils import override_settings from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm, SetPasswordForm, PasswordResetForm) from django.contrib.auth.tests.utils import skipIfCustomUser @override_settings( LANGUAGES=( ('en', 'English'), ), LANGUAGE_CODE='en', TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS, TEMPLATE_DIRS=( os.path.join(os.path.dirname(__file__), 'templates'), ), USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',), ) class AuthViewsTestCase(TestCase): """ Helper base class for all the follow test cases. """ fixtures = ['authtestdata.json'] urls = 'django.contrib.auth.tests.urls' def login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL)) self.assertTrue(SESSION_KEY in self.client.session) def assertContainsEscaped(self, response, text, **kwargs): return self.assertContains(response, escape(force_text(text)), **kwargs) @skipIfCustomUser class AuthViewNamedURLTests(AuthViewsTestCase): urls = 'django.contrib.auth.urls' def test_named_urls(self): "Named URLs should be reversible" expected_named_urls = [ ('login', [], {}), ('logout', [], {}), ('password_change', [], {}), ('password_change_done', [], {}), ('password_reset', [], {}), ('password_reset_done', [], {}), ('password_reset_confirm', [], { 'uidb36': 'aaaaaaa', 'token': '1111-aaaaa', }), ('password_reset_complete', [], {}), ] for name, args, kwargs in expected_named_urls: try: reverse(name, args=args, kwargs=kwargs) except NoReverseMatch: self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name) @skipIfCustomUser class PasswordResetTest(AuthViewsTestCase): def test_email_not_found(self): "Error is raised if the provided email address isn't currently registered" response = self.client.get('/password_reset/') self.assertEqual(response.status_code, 200) response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'}) self.assertContainsEscaped(response, PasswordResetForm.error_messages['unknown']) self.assertEqual(len(mail.outbox), 0) def test_email_found(self): "Email is sent if a valid email address is provided for password reset" response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) def test_email_found_custom_from(self): "Email is sent if a valid email address is provided for password reset when a custom from_email is provided." response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertEqual("staffmember@example.com", mail.outbox[0].from_email) def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertTrue(urlmatch is not None, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertContains(response, "Please enter your new password") def test_confirm_invalid(self): url, path = self._test_confirm_start() # Let's munge the token in the path, but keep the same length, # in case the URLconf will reject a different length. path = path[:-5] + ("0" * 4) + path[-1] response = self.client.get(path) self.assertContains(response, "The password reset link was invalid") def test_confirm_invalid_user(self): # Ensure that we get a 200 response for a non-existant user, not a 404 response = self.client.get('/reset/123456-1-1/') self.assertContains(response, "The password reset link was invalid") def test_confirm_overflow_user(self): # Ensure that we get a 200 response for a base36 user id that overflows int response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/') self.assertContains(response, "The password reset link was invalid") def test_confirm_invalid_post(self): # Same as test_confirm_invalid, but trying # to do a POST instead. url, path = self._test_confirm_start() path = path[:-5] + ("0" * 4) + path[-1] self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': ' anewpassword', }) # Check the password has not been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(not u.check_password("anewpassword")) def test_confirm_complete(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) # It redirects us to a 'complete' page: self.assertEqual(response.status_code, 302) # Check the password has been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(u.check_password("anewpassword")) # Check we can't use the link again response = self.client.get(path) self.assertContains(response, "The password reset link was invalid") def test_confirm_different_passwords(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'x'}) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) @override_settings(AUTH_USER_MODEL='auth.CustomUser') class CustomUserPasswordResetTest(AuthViewsTestCase): fixtures = ['custom_user.json'] def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertTrue(urlmatch is not None, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid_custom_user(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertContains(response, "Please enter your new password") @skipIfCustomUser class ChangePasswordTest(AuthViewsTestCase): def fail_login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertContainsEscaped(response, AuthenticationForm.error_messages['invalid_login']) def logout(self): response = self.client.get('/logout/') def test_password_change_fails_with_invalid_old_password(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'donuts', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertContainsEscaped(response, PasswordChangeForm.error_messages['password_incorrect']) def test_password_change_fails_with_mismatched_passwords(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'donuts', }) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) def test_password_change_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) self.fail_login() self.login(password='password1') def test_password_change_done_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) def test_password_change_done_fails(self): with self.settings(LOGIN_URL='/login/'): response = self.client.get('/password_change/done/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/?next=/password_change/done/')) @skipIfCustomUser class LoginTest(AuthViewsTestCase): def test_current_site_in_context_after_login(self): response = self.client.get(reverse('django.contrib.auth.views.login')) self.assertEqual(response.status_code, 200) if Site._meta.installed: site = Site.objects.get_current() self.assertEqual(response.context['site'], site) self.assertEqual(response.context['site_name'], site.name) else: self.assertIsInstance(response.context['site'], RequestSite) self.assertTrue(isinstance(response.context['form'], AuthenticationForm), 'Login form is not an AuthenticationForm') def test_security_check(self, password='password'): login_url = reverse('django.contrib.auth.views.login') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urlquote(bad_url), } response = self.client.post(nasty_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urlquote(good_url), } response = self.client.post(safe_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) @skipIfCustomUser class LoginURLSettings(AuthViewsTestCase): def setUp(self): super(LoginURLSettings, self).setUp() self.old_LOGIN_URL = settings.LOGIN_URL def tearDown(self): super(LoginURLSettings, self).tearDown() settings.LOGIN_URL = self.old_LOGIN_URL def get_login_required_url(self, login_url): settings.LOGIN_URL = login_url response = self.client.get('/login_required/') self.assertEqual(response.status_code, 302) return response['Location'] def test_standard_login_url(self): login_url = '/login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver%s?%s' % (login_url, querystring.urlencode('/'))) def test_remote_login_url(self): login_url = 'http://remote.example.com/login' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_https_login_url(self): login_url = 'https:///login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_login_url_with_querystring(self): login_url = '/login/?pretty=1' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('pretty=1', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver/login/?%s' % querystring.urlencode('/')) def test_remote_login_url_with_next_querystring(self): login_url = 'http://remote.example.com/login/' login_required_url = self.get_login_required_url('%s?next=/default/' % login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) @skipIfCustomUser class LogoutTest(AuthViewsTestCase): def confirm_logged_out(self): self.assertTrue(SESSION_KEY not in self.client.session) def test_logout_default(self): "Logout without next_page option renders the default template" self.login() response = self.client.get('/logout/') self.assertContains(response, 'Logged out') self.confirm_logged_out() def test_14377(self): # Bug 14377 self.login() response = self.client.get('/logout/') self.assertTrue('site' in response.context) def test_logout_with_overridden_redirect_url(self): # Bug 11223 self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) response = self.client.get('/logout/next_page/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_next_page_specified(self): "Logout with next_page option given redirects to specified resource" self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_logout_with_redirect_argument(self): "Logout with query string redirects to specified resource" self.login() response = self.client.get('/logout/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_custom_redirect_argument(self): "Logout with custom query string redirects to specified resource" self.login() response = self.client.get('/logout/custom_query/?follow=/somewhere/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_security_check(self, password='password'): logout_url = reverse('django.contrib.auth.views.logout') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urlquote(bad_url), } self.login() response = self.client.get(nasty_url) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) self.confirm_logged_out() # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urlquote(good_url), } self.login() response = self.client.get(safe_url) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) self.confirm_logged_out()
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3767_1
crossvul-python_data_good_3659_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import re import time import urllib from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state from nova.api import validator from nova import block_device from nova import compute from nova.compute import instance_types from nova.compute import vm_states from nova import crypto from nova import db from nova import exception from nova import flags from nova.image import s3 from nova import log as logging from nova import network from nova.rpc import common as rpc_common from nova import quota from nova import utils from nova import volume FLAGS = flags.FLAGS flags.DECLARE('dhcp_domain', 'nova.network.manager') LOG = logging.getLogger(__name__) def validate_ec2_id(val): if not validator.validate_str()(val): raise exception.InvalidInstanceIDMalformed(val) try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: raise exception.InvalidInstanceIDMalformed(val) def _gen_key(context, user_id, key_name): """Generate a key This is a module level method because it is slow and we need to defer it into a process pool.""" # NOTE(vish): generating key pair is slow so check for legal # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass private_key, public_key, fingerprint = crypto.generate_key_pair() key = {} key['user_id'] = user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'private_key': private_key, 'fingerprint': fingerprint} # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | # stopped 80 _STATE_DESCRIPTION_MAP = { None: inst_state.PENDING, vm_states.ACTIVE: inst_state.RUNNING, vm_states.BUILDING: inst_state.PENDING, vm_states.REBUILDING: inst_state.PENDING, vm_states.DELETED: inst_state.TERMINATED, vm_states.SOFT_DELETE: inst_state.TERMINATED, vm_states.STOPPED: inst_state.STOPPED, vm_states.SHUTOFF: inst_state.SHUTOFF, vm_states.MIGRATING: inst_state.MIGRATE, vm_states.RESIZING: inst_state.RESIZE, vm_states.PAUSED: inst_state.PAUSE, vm_states.SUSPENDED: inst_state.SUSPEND, vm_states.RESCUED: inst_state.RESCUE, } def _state_description(vm_state, shutdown_terminate): """Map the vm state to the server status string""" if (vm_state == vm_states.SHUTOFF and not shutdown_terminate): name = inst_state.STOPPED else: name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) return {'code': inst_state.name_to_code(name), 'name': name} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: id = ec2utils.ec2_id_to_id(ec2_id) if ec2_id.startswith('snap-'): bdm['snapshot_id'] = id elif ec2_id.startswith('vol-'): bdm['volume_id'] = id ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Contruct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API(network_api=self.network_api, volume_api=self.volume_api) self.sgh = utils.import_object(FLAGS.security_group_handler) def __str__(self): return 'CloudController' def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: if not zone in available_zones: available_zones.append(zone) not_available_zones = [] for zone in [service.availability_zone for service in disabled_services if not service['availability_zone'] in available_zones]: if not zone in not_available_zones: not_available_zones.append(zone) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context, False) hosts = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, 'zoneState': ''}) hsvcs = [service for service in services if service['host'] == host] for svc in hsvcs: alive = utils.service_is_up(svc) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' rv['availabilityZoneInfo'].append({ 'zoneName': '| |- %s' % svc['binary'], 'zoneState': '%s %s %s' % (active, art, svc['updated_at'])}) return rv def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) snapshots = [self._format_snapshot(context, s) for s in snapshots] return {'snapshotSet': snapshots} def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) volume = self.volume_api.get(context, volume_id) snapshot = self.volume_api.create_snapshot( context, volume, None, kwargs.get('description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) snapshot = self.volume_api.get_snapshot(context, snapshot_id) self.volume_api.delete_snapshot(context, snapshot) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix if context.is_admin or not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): if not re.match('^[a-zA-Z0-9_\- ]+$', str(key_name)): err = _("Value (%s) for KeyName is invalid." " Content limited to Alphanumeric character, " "spaces, dashes, and underscore.") % key_name raise exception.EC2APIError(err) if len(str(key_name)) > 255: err = _("Value (%s) for Keyname is invalid." " Length exceeds maximum of 255.") % key_name raise exception.EC2APIError(err) LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user_id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_key_pair(self, context, key_name, public_key_material, **kwargs): LOG.audit(_("Import key %s"), key_name, context=context) try: db.key_pair_get(context, context.user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass public_key = base64.b64decode(public_key_material) fingerprint = crypto.generate_fingerprint(public_key) key = {} key['user_id'] = context.user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'keyName': key_name, 'keyFingerprint': fingerprint} def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): self.compute_api.ensure_default_security_group(context) if group_name or group_id: groups = [] if group_name: for name in group_name: group = db.security_group_get_by_name(context, context.project_id, name) groups.append(group) if group_id: for gid in group_id: group = db.security_group_get(context, gid) groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group.description g['groupName'] = group.name g['ownerId'] = group.project_id g['ipPermissions'] = [] for rule in group.rules: r = {} r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] if rule.protocol: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65535)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if not 'groups' in kwargs and not 'ip_ranges' in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): values = {} if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = db.security_group_get_by_name( context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() if not utils.is_valid_cidr(cidr_ip): # Raise exception for non-valid address raise exception.EC2APIError(_("Invalid CIDR")) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' if source_security_group_name: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol.lower() values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return rule['id'] return False def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) rule_id = None rule_ids = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id rule_id = self._security_group_rule_exists(security_group, values_for_rule) if rule_id: db.security_group_rule_destroy(context, rule_id) rule_ids.append(rule_id) if rule_id: # NOTE(vish): we removed a rule, so refresh self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) postvalues = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values_for_rule): err = _('%s - This rule already exists in group') raise exception.EC2APIError(err % values_for_rule) postvalues.append(values_for_rule) allowed = quota.allowed_security_group_rules(context, security_group['id'], 1) if allowed < 1: msg = _("Quota exceeded, too many security group rules.") raise exception.EC2APIError(msg) rule_ids = [] for values_for_rule in postvalues: security_group_rule = db.security_group_rule_create( context, values_for_rule) rule_ids.append(security_group_rule['id']) if postvalues: self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_create_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): # Some validation to ensure that values match API spec. # - Alphanumeric characters, spaces, dashes, and underscores. # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. err = _("Value (%s) for parameter GroupName is invalid." " Content limited to Alphanumeric characters, " "spaces, dashes, and underscores.") % group_name # err not that of master ec2 implementation, as they fail to raise. raise exception.InvalidParameterValue(err=err) if len(str(group_name)) > 255: err = _("Value (%s) for parameter GroupName is invalid." " Length exceeds maximum of 255.") % group_name raise exception.InvalidParameterValue(err=err) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('group %s already exists') raise exception.EC2APIError(msg % group_name) if quota.allowed_security_groups(context, 1) < 1: msg = _("Quota exceeded, too many security groups.") raise exception.EC2APIError(msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) elif group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) if db.security_group_in_use(context, security_group.id): raise exception.InvalidGroup(reason="In Use") LOG.audit(_("Delete security group %s"), group_name, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh(context, security_group.id) return True def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if isinstance(instance_id, list): ec2_id = instance_id[0] else: ec2_id = instance_id validate_ec2_id(ec2_id) instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, instance_id) output = self.compute_api.get_console_output(context, instance) now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: validate_ec2_id(ec2_id) internal_id = ec2utils.ec2_id_to_id(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance', None): instance_id = volume['instance']['id'] instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['project_id'], volume['host'], instance_data, volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') is not None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None return v def create_volume(self, context, **kwargs): size = kwargs.get('size') if kwargs.get('snapshot_id') is not None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) snapshot = self.volume_api.get_snapshot(context, snapshot_id) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: snapshot = None LOG.audit(_("Create volume of %s GB"), size, context=context) availability_zone = kwargs.get('availability_zone', None) volume = self.volume_api.create(context, size, None, None, snapshot, availability_zone=availability_zone) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) try: volume = self.volume_api.get(context, volume_id) self.volume_api.delete(context, volume) except exception.InvalidVolume: raise exception.EC2APIError(_('Delete Failed')) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): validate_ec2_id(instance_id) validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) try: self.compute_api.attach_volume(context, instance, volume_id, device) except exception.InvalidVolume: raise exception.EC2APIError(_('Attach Failed.')) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def detach_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) try: instance = self.compute_api.detach_volume(context, volume_id=volume_id) except exception.InvalidVolume: raise exception.EC2APIError(_('Detach Volume Failed.')) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, context, instance_ref, result, key): kernel_uuid = instance_ref['kernel_id'] if kernel_uuid is None or kernel_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki') def _format_ramdisk_id(self, context, instance_ref, result, key): ramdisk_uuid = instance_ref['ramdisk_id'] if ramdisk_uuid is None or ramdisk_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid, 'ari') def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.EC2APIError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance_id, tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): result['disableApiTermination'] = instance['disable_terminate'] def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): if instance['shutdown_terminate']: result['instanceInitiatedShutdownBehavior'] = 'terminate' else: result['instanceInitiatedShutdownBehavior'] = 'stop' def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(context, instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(context, instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = base64.b64decode(instance['user_data']) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.EC2APIError( _('attribute not supported: %s') % attribute) ec2_instance_id = instance_id validate_ec2_id(instance_id) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) result = {'instance_id': ec2_instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_terminate_instances(self, context, instance_id, previous_states): instances_set = [] for (ec2_id, previous_state) in zip(instance_id, previous_states): i = {} i['instanceId'] = ec2_id i['previousState'] = _state_description(previous_state['vm_state'], previous_state['shutdown_terminate']) try: internal_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, internal_id) i['shutdownState'] = _state_description(instance['vm_state'], instance['shutdown_terminate']) except exception.NotFound: i['shutdownState'] = _state_description(vm_states.DELETED, True) instances_set.append(i) return {'instancesSet': instances_set} def _format_instance_bdm(self, context, instance_id, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, instance_id): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '-', 'status': vol['status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or block_device.DEFAULT_ROOT_DEV_NAME) @staticmethod def _format_instance_type(instance, result): if instance['instance_type']: result['instanceType'] = instance['instance_type'].get('name') else: result['instanceType'] = None @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) try: instance = self.compute_api.get(context, internal_id) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts, sort_dir='asc') except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id image_uuid = instance['image_ref'] i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid) self._format_kernel_id(context, instance, i, 'kernelId') self._format_ramdisk_id(context, instance, i, 'ramdiskId') i['instanceState'] = _state_description( instance['vm_state'], instance['shutdown_terminate']) fixed_ip = None floating_ip = None ip_info = ec2utils.get_ip_info_for_instance(context, instance) if ip_info['fixed_ips']: fixed_ip = ip_info['fixed_ips'][0] if ip_info['floating_ips']: floating_ip = ip_info['floating_ips'][0] if ip_info['fixed_ip6s']: i['dnsNameV6'] = ip_info['fixed_ip6s'][0] if FLAGS.ec2_private_dns_show_ip: i['privateDnsName'] = fixed_ip else: i['privateDnsName'] = instance['hostname'] i['privateIpAddress'] = fixed_ip i['publicDnsName'] = floating_ip i['ipAddress'] = floating_ip or fixed_ip i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance_id, i['rootDeviceName'], i) host = instance['host'] services = db.service_get_all_by_host(context.elevated(), host) zone = ec2utils.get_availability_zone_by_host(services, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, **kwargs): return self.format_addresses(context) def format_addresses(self, context): addresses = [] floaters = self.network_api.get_floating_ips_by_project(context) for floating_ip_ref in floaters: if floating_ip_ref['project_id'] is None: continue address = floating_ip_ref['address'] ec2_id = None if floating_ip_ref['fixed_ip_id']: fixed_id = floating_ip_ref['fixed_ip_id'] fixed = self.network_api.get_fixed_ip(context, fixed_id) if fixed['instance_id'] is not None: ec2_id = ec2utils.id_to_ec2_id(fixed['instance_id']) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) try: public_ip = self.network_api.allocate_floating_ip(context) return {'publicIp': public_ip} except rpc_common.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': raise exception.NoMoreFloatingIps() else: raise def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) self.network_api.release_floating_ip(context, address=public_ip) return {'return': "true"} def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) self.compute_api.associate_floating_ip(context, instance, address=public_ip) return {'return': "true"} def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, address=public_ip) return {'return': "true"} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = ec2utils.id_to_glance_id(context, kernel['id']) if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context, ramdisk['id']) for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) image_uuid = ec2utils.id_to_glance_id(context, image['id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFound(image_id=kwargs['image_id']) if image_state != 'available': raise exception.EC2APIError(_('Image must be available')) (instances, resv_id) = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_href=image_uuid, min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, resv_id) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) previous_states = [] for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) previous_states.append(instance) self.compute_api.delete(context, instance) return self._format_terminate_instances(context, instance_id, previous_states) def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.reboot(context, instance, 'HARD') return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to stop instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.stop(context, instance) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to start instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.start(context, instance) return True def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if ec2utils.image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by GlanceImageService to S3 format.""" i = {} image_type = ec2utils.image_type(image.get('container_format')) ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari') if FLAGS.auth_strategy == 'deprecated': i['imageOwnerId'] = image['properties'].get('project_id') else: i['imageOwnerId'] = image.get('owner') img_loc = image['properties'].get('image_location') if img_loc: i['imageLocation'] = img_loc else: i['imageLocation'] = "%s (%s)" % (img_loc, name) i['name'] = name if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name i['name'] = img_loc i['imageState'] = self._get_image_state(image) i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = not not image.get('is_public') i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or block_device.DEFAULT_ROOT_DEV_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return {'imageId': image_id} def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = ec2utils.image_type(image.get('container_format')) image_id = ec2utils.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and kwargs.get('name'): image_location = kwargs['name'] if image_location is None: raise exception.EC2APIError(_('imageLocation is required')) metadata = {'properties': {'image_location': image_location}} if kwargs.get('name'): metadata['name'] = kwargs['name'] else: metadata['name'] = image_location if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = kwargs.get( 'root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): _prop_root_dev_name = block_device.properties_root_device_name result['rootDeviceName'] = _prop_root_dev_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.EC2APIError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.EC2APIError(_('attribute not supported: %s') % attribute) if not 'user_group' in kwargs: raise exception.EC2APIError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.EC2APIError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.EC2APIError(msg) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') try: return self.image_service.update(context, internal_id, image) except exception.ImageNotAuthorized: msg = _('Not allowed to modify attributes for image %s') raise exception.EC2APIError(msg % image_id) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) validate_ec2_id(instance_id) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF): restart_instance = True self.compute_api.stop(context, instance) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 if time.time() > start_time + timeout: raise exception.EC2APIError( _('Couldn\'t stop instance with in %d sec') % timeout) src_image = self._get_image(context, instance['image_ref']) properties = src_image['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] mapping = [] bdms = db.block_device_mapping_get_all_by_instance(context, instance_id) for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( context, volume, volume['display_name'], volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in _properties_get_mappings(properties): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): src_image.pop(attr, None) image_id = self._register_image(context, src_image) if restart_instance: self.compute_api.start(context, instance_id=instance_id) return {'imageId': image_id}
./CrossVul/dataset_final_sorted/CWE-20/py/good_3659_0
crossvul-python_data_bad_3499_1
404: Not Found
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3499_1
crossvul-python_data_bad_3683_0
# Copyright 2012, Piston Cloud Computing, Inc. # Copyright 2012, OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import netaddr from nova.compute import api as compute from nova.scheduler import filters class AffinityFilter(filters.BaseHostFilter): def __init__(self): self.compute_api = compute.API() def _affinity_host(self, context, instance_id): return self.compute_api.get(context, instance_id)['host'] class DifferentHostFilter(AffinityFilter): '''Schedule the instance on a different host from a set of instances.''' def host_passes(self, host_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} me = host_state.host affinity_uuids = scheduler_hints.get('different_host', []) if isinstance(affinity_uuids, basestring): affinity_uuids = [affinity_uuids] if affinity_uuids: return not any([i for i in affinity_uuids if self._affinity_host(context, i) == me]) # With no different_host key return True class SameHostFilter(AffinityFilter): '''Schedule the instance on the same host as another instance in a set of of instances. ''' def host_passes(self, host_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} me = host_state.host affinity_uuids = scheduler_hints.get('same_host', []) if isinstance(affinity_uuids, basestring): affinity_uuids = [affinity_uuids] if affinity_uuids: return any([i for i in affinity_uuids if self._affinity_host(context, i) == me]) # With no same_host key return True class SimpleCIDRAffinityFilter(AffinityFilter): def host_passes(self, host_state, filter_properties): scheduler_hints = filter_properties.get('scheduler_hints') or {} affinity_cidr = scheduler_hints.get('cidr', '/24') affinity_host_addr = scheduler_hints.get('build_near_host_ip') host_ip = host_state.capabilities.get('host_ip') if affinity_host_addr: affinity_net = netaddr.IPNetwork(str.join('', (affinity_host_addr, affinity_cidr))) return netaddr.IPAddress(host_ip) in affinity_net # We don't have an affinity host address. return True
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3683_0
crossvul-python_data_bad_117_2
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function from cryptography import utils from cryptography.exceptions import InvalidTag, UnsupportedAlgorithm, _Reasons from cryptography.hazmat.primitives import ciphers from cryptography.hazmat.primitives.ciphers import modes @utils.register_interface(ciphers.CipherContext) @utils.register_interface(ciphers.AEADCipherContext) @utils.register_interface(ciphers.AEADEncryptionContext) @utils.register_interface(ciphers.AEADDecryptionContext) class _CipherContext(object): _ENCRYPT = 1 _DECRYPT = 0 def __init__(self, backend, cipher, mode, operation): self._backend = backend self._cipher = cipher self._mode = mode self._operation = operation self._tag = None if isinstance(self._cipher, ciphers.BlockCipherAlgorithm): self._block_size_bytes = self._cipher.block_size // 8 else: self._block_size_bytes = 1 ctx = self._backend._lib.EVP_CIPHER_CTX_new() ctx = self._backend._ffi.gc( ctx, self._backend._lib.EVP_CIPHER_CTX_free ) registry = self._backend._cipher_registry try: adapter = registry[type(cipher), type(mode)] except KeyError: raise UnsupportedAlgorithm( "cipher {0} in {1} mode is not supported " "by this backend.".format( cipher.name, mode.name if mode else mode), _Reasons.UNSUPPORTED_CIPHER ) evp_cipher = adapter(self._backend, cipher, mode) if evp_cipher == self._backend._ffi.NULL: raise UnsupportedAlgorithm( "cipher {0} in {1} mode is not supported " "by this backend.".format( cipher.name, mode.name if mode else mode), _Reasons.UNSUPPORTED_CIPHER ) if isinstance(mode, modes.ModeWithInitializationVector): iv_nonce = mode.initialization_vector elif isinstance(mode, modes.ModeWithTweak): iv_nonce = mode.tweak elif isinstance(mode, modes.ModeWithNonce): iv_nonce = mode.nonce elif isinstance(cipher, modes.ModeWithNonce): iv_nonce = cipher.nonce else: iv_nonce = self._backend._ffi.NULL # begin init with cipher and operation type res = self._backend._lib.EVP_CipherInit_ex(ctx, evp_cipher, self._backend._ffi.NULL, self._backend._ffi.NULL, self._backend._ffi.NULL, operation) self._backend.openssl_assert(res != 0) # set the key length to handle variable key ciphers res = self._backend._lib.EVP_CIPHER_CTX_set_key_length( ctx, len(cipher.key) ) self._backend.openssl_assert(res != 0) if isinstance(mode, modes.GCM): res = self._backend._lib.EVP_CIPHER_CTX_ctrl( ctx, self._backend._lib.EVP_CTRL_AEAD_SET_IVLEN, len(iv_nonce), self._backend._ffi.NULL ) self._backend.openssl_assert(res != 0) if mode.tag is not None: res = self._backend._lib.EVP_CIPHER_CTX_ctrl( ctx, self._backend._lib.EVP_CTRL_AEAD_SET_TAG, len(mode.tag), mode.tag ) self._backend.openssl_assert(res != 0) self._tag = mode.tag elif ( self._operation == self._DECRYPT and self._backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and not self._backend._lib.CRYPTOGRAPHY_IS_LIBRESSL ): raise NotImplementedError( "delayed passing of GCM tag requires OpenSSL >= 1.0.2." " To use this feature please update OpenSSL" ) # pass key/iv res = self._backend._lib.EVP_CipherInit_ex( ctx, self._backend._ffi.NULL, self._backend._ffi.NULL, cipher.key, iv_nonce, operation ) self._backend.openssl_assert(res != 0) # We purposely disable padding here as it's handled higher up in the # API. self._backend._lib.EVP_CIPHER_CTX_set_padding(ctx, 0) self._ctx = ctx def update(self, data): buf = bytearray(len(data) + self._block_size_bytes - 1) n = self.update_into(data, buf) return bytes(buf[:n]) def update_into(self, data, buf): if len(buf) < (len(data) + self._block_size_bytes - 1): raise ValueError( "buffer must be at least {0} bytes for this " "payload".format(len(data) + self._block_size_bytes - 1) ) buf = self._backend._ffi.cast( "unsigned char *", self._backend._ffi.from_buffer(buf) ) outlen = self._backend._ffi.new("int *") res = self._backend._lib.EVP_CipherUpdate(self._ctx, buf, outlen, data, len(data)) self._backend.openssl_assert(res != 0) return outlen[0] def finalize(self): # OpenSSL 1.0.1 on Ubuntu 12.04 (and possibly other distributions) # appears to have a bug where you must make at least one call to update # even if you are only using authenticate_additional_data or the # GCM tag will be wrong. An (empty) call to update resolves this # and is harmless for all other versions of OpenSSL. if isinstance(self._mode, modes.GCM): self.update(b"") if ( self._operation == self._DECRYPT and isinstance(self._mode, modes.ModeWithAuthenticationTag) and self.tag is None ): raise ValueError( "Authentication tag must be provided when decrypting." ) buf = self._backend._ffi.new("unsigned char[]", self._block_size_bytes) outlen = self._backend._ffi.new("int *") res = self._backend._lib.EVP_CipherFinal_ex(self._ctx, buf, outlen) if res == 0: errors = self._backend._consume_errors() if not errors and isinstance(self._mode, modes.GCM): raise InvalidTag self._backend.openssl_assert( errors[0]._lib_reason_match( self._backend._lib.ERR_LIB_EVP, self._backend._lib.EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH ) ) raise ValueError( "The length of the provided data is not a multiple of " "the block length." ) if (isinstance(self._mode, modes.GCM) and self._operation == self._ENCRYPT): tag_buf = self._backend._ffi.new( "unsigned char[]", self._block_size_bytes ) res = self._backend._lib.EVP_CIPHER_CTX_ctrl( self._ctx, self._backend._lib.EVP_CTRL_AEAD_GET_TAG, self._block_size_bytes, tag_buf ) self._backend.openssl_assert(res != 0) self._tag = self._backend._ffi.buffer(tag_buf)[:] res = self._backend._lib.EVP_CIPHER_CTX_cleanup(self._ctx) self._backend.openssl_assert(res == 1) return self._backend._ffi.buffer(buf)[:outlen[0]] def finalize_with_tag(self, tag): if ( self._backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and not self._backend._lib.CRYPTOGRAPHY_IS_LIBRESSL ): raise NotImplementedError( "finalize_with_tag requires OpenSSL >= 1.0.2. To use this " "method please update OpenSSL" ) res = self._backend._lib.EVP_CIPHER_CTX_ctrl( self._ctx, self._backend._lib.EVP_CTRL_AEAD_SET_TAG, len(tag), tag ) self._backend.openssl_assert(res != 0) self._tag = tag return self.finalize() def authenticate_additional_data(self, data): outlen = self._backend._ffi.new("int *") res = self._backend._lib.EVP_CipherUpdate( self._ctx, self._backend._ffi.NULL, outlen, data, len(data) ) self._backend.openssl_assert(res != 0) tag = utils.read_only_property("_tag")
./CrossVul/dataset_final_sorted/CWE-20/py/bad_117_2
crossvul-python_data_bad_3660_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import re import time import urllib from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state from nova.api import validator from nova import block_device from nova import compute from nova.compute import instance_types from nova.compute import vm_states from nova import db from nova import exception from nova import flags from nova.image import s3 from nova import log as logging from nova import network from nova.openstack.common import excutils from nova.openstack.common import importutils from nova import quota from nova import utils from nova import volume FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS def validate_ec2_id(val): if not validator.validate_str()(val): raise exception.InvalidInstanceIDMalformed(val) try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: raise exception.InvalidInstanceIDMalformed(val) # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | # stopped 80 _STATE_DESCRIPTION_MAP = { None: inst_state.PENDING, vm_states.ACTIVE: inst_state.RUNNING, vm_states.BUILDING: inst_state.PENDING, vm_states.REBUILDING: inst_state.PENDING, vm_states.DELETED: inst_state.TERMINATED, vm_states.SOFT_DELETE: inst_state.TERMINATED, vm_states.STOPPED: inst_state.STOPPED, vm_states.SHUTOFF: inst_state.SHUTOFF, vm_states.MIGRATING: inst_state.MIGRATE, vm_states.RESIZING: inst_state.RESIZE, vm_states.PAUSED: inst_state.PAUSE, vm_states.SUSPENDED: inst_state.SUSPEND, vm_states.RESCUED: inst_state.RESCUE, } def _state_description(vm_state, shutdown_terminate): """Map the vm state to the server status string""" if (vm_state == vm_states.SHUTOFF and not shutdown_terminate): name = inst_state.STOPPED else: name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) return {'code': inst_state.name_to_code(name), 'name': name} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: id = ec2utils.ec2_vol_id_to_uuid(ec2_id) if ec2_id.startswith('snap-'): bdm['snapshot_id'] = id elif ec2_id.startswith('vol-'): bdm['volume_id'] = id ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Contruct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API(network_api=self.network_api, volume_api=self.volume_api) self.keypair_api = compute.api.KeypairAPI() self.sgh = importutils.import_object(FLAGS.security_group_handler) def __str__(self): return 'CloudController' def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: if not zone in available_zones: available_zones.append(zone) not_available_zones = [] for zone in [service.availability_zone for service in disabled_services if not service['availability_zone'] in available_zones]: if not zone in not_available_zones: not_available_zones.append(zone) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context, False) hosts = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, 'zoneState': ''}) hsvcs = [service for service in services if service['host'] == host] for svc in hsvcs: alive = utils.service_is_up(svc) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' rv['availabilityZoneInfo'].append({ 'zoneName': '| |- %s' % svc['binary'], 'zoneState': '%s %s %s' % (active, art, svc['updated_at'])}) return rv def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_snap_id_to_uuid(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) snapshots = [self._format_snapshot(context, s) for s in snapshots] return {'snapshotSet': snapshots} def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) volume = self.volume_api.get(context, volume_id) snapshot = self.volume_api.create_snapshot( context, volume, None, kwargs.get('description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id) snapshot = self.volume_api.get_snapshot(context, snapshot_id) self.volume_api.delete_snapshot(context, snapshot) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = self.keypair_api.get_key_pairs(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix if context.is_admin or not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Create key pair %s"), key_name, context=context) try: keypair = self.keypair_api.create_key_pair(context, context.user_id, key_name) except exception.KeypairLimitExceeded: msg = _("Quota exceeded, too many key pairs.") raise exception.EC2APIError(msg) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise exception.EC2APIError(msg) except exception.KeyPairExists: msg = _("Key pair '%s' already exists.") % key_name raise exception.KeyPairExists(msg) return {'keyName': key_name, 'keyFingerprint': keypair['fingerprint'], 'keyMaterial': keypair['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_key_pair(self, context, key_name, public_key_material, **kwargs): LOG.audit(_("Import key %s"), key_name, context=context) public_key = base64.b64decode(public_key_material) try: keypair = self.keypair_api.import_key_pair(context, context.user_id, key_name, public_key) except exception.KeypairLimitExceeded: msg = _("Quota exceeded, too many key pairs.") raise exception.EC2APIError(msg) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise exception.EC2APIError(msg) except exception.KeyPairExists: msg = _("Key pair '%s' already exists.") % key_name raise exception.EC2APIError(msg) return {'keyName': key_name, 'keyFingerprint': keypair['fingerprint']} def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: self.keypair_api.delete_key_pair(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): self.compute_api.ensure_default_security_group(context) if group_name or group_id: groups = [] if group_name: for name in group_name: group = db.security_group_get_by_name(context, context.project_id, name) groups.append(group) if group_id: for gid in group_id: group = db.security_group_get(context, gid) groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group.description g['groupName'] = group.name g['ownerId'] = group.project_id g['ipPermissions'] = [] for rule in group.rules: r = {} r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] if rule.protocol: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65535)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if not 'groups' in kwargs and not 'ip_ranges' in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): values = {} if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = db.security_group_get_by_name( context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() if not utils.is_valid_cidr(cidr_ip): # Raise exception for non-valid address raise exception.EC2APIError(_("Invalid CIDR")) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' if source_security_group_name: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return rule['id'] return False def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) rule_id = None rule_ids = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id rule_id = self._security_group_rule_exists(security_group, values_for_rule) if rule_id: db.security_group_rule_destroy(context, rule_id) rule_ids.append(rule_id) if rule_id: # NOTE(vish): we removed a rule, so refresh self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) postvalues = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values_for_rule): err = _('%s - This rule already exists in group') raise exception.EC2APIError(err % values_for_rule) postvalues.append(values_for_rule) count = QUOTAS.count(context, 'security_group_rules', security_group['id']) try: QUOTAS.limit_check(context, security_group_rules=count + 1) except exception.OverQuota: msg = _("Quota exceeded, too many security group rules.") raise exception.EC2APIError(msg) rule_ids = [] for values_for_rule in postvalues: security_group_rule = db.security_group_rule_create( context, values_for_rule) rule_ids.append(security_group_rule['id']) if postvalues: self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_create_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if isinstance(group_name, unicode): group_name = group_name.encode('utf-8') # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. if FLAGS.ec2_strict_validation: # EC2 specification gives constraints for name and description: # Accepts alphanumeric characters, spaces, dashes, and underscores err = _("Value (%(value)s) for parameter %(param)s is invalid." " Content limited to Alphanumeric characters," " spaces, dashes, and underscores.") if not re.match('^[a-zA-Z0-9_\- ]+$', group_name): raise exception.InvalidParameterValue( err=err % {"value": group_name, "param": "GroupName"}) if not re.match('^[a-zA-Z0-9_\- ]+$', group_description): raise exception.InvalidParameterValue( err=err % {"value": group_description, "param": "GroupDescription"}) else: # Amazon accepts more symbols. # So, allow POSIX [:print:] characters. if not re.match(r'^[\x20-\x7E]+$', group_name): err = _("Value (%(value)s) for parameter %(param)s is invalid." " Content is limited to characters" " from the [:print:] class.") raise exception.InvalidParameterValue( err=err % {"value": group_name, "param": "GroupName"}) if len(group_name) > 255: err = _("Value (%s) for parameter GroupName is invalid." " Length exceeds maximum of 255.") % group_name raise exception.InvalidParameterValue(err=err) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('group %s already exists') raise exception.EC2APIError(msg % group_name) try: reservations = QUOTAS.reserve(context, security_groups=1) except exception.OverQuota: msg = _("Quota exceeded, too many security groups.") raise exception.EC2APIError(msg) try: group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) # Commit the reservation QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) elif group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) if db.security_group_in_use(context, security_group.id): raise exception.InvalidGroup(reason="In Use") # Get reservations try: reservations = QUOTAS.reserve(context, security_groups=-1) except Exception: reservations = None LOG.exception(_("Failed to update usages deallocating " "security group")) LOG.audit(_("Delete security group %s"), group_name, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh(context, security_group.id) # Commit the reservations if reservations: QUOTAS.commit(context, reservations) return True def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if isinstance(instance_id, list): ec2_id = instance_id[0] else: ec2_id = instance_id validate_ec2_id(ec2_id) instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, instance_id) output = self.compute_api.get_console_output(context, instance) now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: validate_ec2_id(ec2_id) internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance_uuid', None): instance_uuid = volume['instance_uuid'] instance = db.instance_get_by_uuid(context.elevated(), instance_uuid) instance_id = instance['id'] instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, instance['host']) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['project_id'], volume['host'], instance_data, volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') is not None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None return v def create_volume(self, context, **kwargs): size = kwargs.get('size') if kwargs.get('snapshot_id') is not None: snapshot_id = ec2utils.ec2_snap_id_to_uuid(kwargs['snapshot_id']) snapshot = self.volume_api.get_snapshot(context, snapshot_id) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: snapshot = None LOG.audit(_("Create volume of %s GB"), size, context=context) availability_zone = kwargs.get('availability_zone', None) volume = self.volume_api.create(context, size, None, None, snapshot, availability_zone=availability_zone) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) try: volume = self.volume_api.get(context, volume_id) self.volume_api.delete(context, volume) except exception.InvalidVolume: raise exception.EC2APIError(_('Delete Failed')) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): validate_ec2_id(instance_id) validate_ec2_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) try: self.compute_api.attach_volume(context, instance, volume_id, device) except exception.InvalidVolume: raise exception.EC2APIError(_('Attach Failed.')) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def detach_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) try: instance = self.compute_api.detach_volume(context, volume_id=volume_id) except exception.InvalidVolume: raise exception.EC2APIError(_('Detach Volume Failed.')) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, context, instance_ref, result, key): kernel_uuid = instance_ref['kernel_id'] if kernel_uuid is None or kernel_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki') def _format_ramdisk_id(self, context, instance_ref, result, key): ramdisk_uuid = instance_ref['ramdisk_id'] if ramdisk_uuid is None or ramdisk_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid, 'ari') def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.EC2APIError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance['uuid'], tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): result['disableApiTermination'] = instance['disable_terminate'] def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): if instance['shutdown_terminate']: result['instanceInitiatedShutdownBehavior'] = 'terminate' else: result['instanceInitiatedShutdownBehavior'] = 'stop' def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(context, instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(context, instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = base64.b64decode(instance['user_data']) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.EC2APIError( _('attribute not supported: %s') % attribute) ec2_instance_id = instance_id validate_ec2_id(instance_id) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) result = {'instance_id': ec2_instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_terminate_instances(self, context, instance_id, previous_states): instances_set = [] for (ec2_id, previous_state) in zip(instance_id, previous_states): i = {} i['instanceId'] = ec2_id i['previousState'] = _state_description(previous_state['vm_state'], previous_state['shutdown_terminate']) try: internal_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, internal_id) i['shutdownState'] = _state_description(instance['vm_state'], instance['shutdown_terminate']) except exception.NotFound: i['shutdownState'] = _state_description(vm_states.DELETED, True) instances_set.append(i) return {'instancesSet': instances_set} def _format_instance_bdm(self, context, instance_uuid, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, instance_uuid): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '-', 'status': vol['status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or block_device.DEFAULT_ROOT_DEV_NAME) @staticmethod def _format_instance_type(instance, result): if instance['instance_type']: result['instanceType'] = instance['instance_type'].get('name') else: result['instanceType'] = None @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) try: instance = self.compute_api.get(context, internal_id) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts, sort_dir='asc') except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id image_uuid = instance['image_ref'] i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid) self._format_kernel_id(context, instance, i, 'kernelId') self._format_ramdisk_id(context, instance, i, 'ramdiskId') i['instanceState'] = _state_description( instance['vm_state'], instance['shutdown_terminate']) fixed_ip = None floating_ip = None ip_info = ec2utils.get_ip_info_for_instance(context, instance) if ip_info['fixed_ips']: fixed_ip = ip_info['fixed_ips'][0] if ip_info['floating_ips']: floating_ip = ip_info['floating_ips'][0] if ip_info['fixed_ip6s']: i['dnsNameV6'] = ip_info['fixed_ip6s'][0] if FLAGS.ec2_private_dns_show_ip: i['privateDnsName'] = fixed_ip else: i['privateDnsName'] = instance['hostname'] i['privateIpAddress'] = fixed_ip i['publicDnsName'] = floating_ip i['ipAddress'] = floating_ip or fixed_ip i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance['uuid'], i['rootDeviceName'], i) host = instance['host'] services = db.service_get_all_by_host(context.elevated(), host) zone = ec2utils.get_availability_zone_by_host(services, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, public_ip=None, **kwargs): if public_ip: floatings = [] for address in public_ip: floating = self.network_api.get_floating_ip_by_address(context, address) floatings.append(floating) else: floatings = self.network_api.get_floating_ips_by_project(context) addresses = [self._format_address(context, f) for f in floatings] return {'addressesSet': addresses} def _format_address(self, context, floating_ip): ec2_id = None if floating_ip['fixed_ip_id']: fixed_id = floating_ip['fixed_ip_id'] fixed = self.network_api.get_fixed_ip(context, fixed_id) if fixed['instance_id'] is not None: ec2_id = ec2utils.id_to_ec2_id(fixed['instance_id']) address = {'public_ip': floating_ip['address'], 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address['instance_id'], floating_ip['project_id']) address['instance_id'] = details return address def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) public_ip = self.network_api.allocate_floating_ip(context) return {'publicIp': public_ip} def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) try: self.network_api.release_floating_ip(context, address=public_ip) return {'return': "true"} except exception.FloatingIpNotFound: raise exception.EC2APIError(_('Unable to release IP Address.')) def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) try: self.compute_api.associate_floating_ip(context, instance, address=public_ip) return {'return': "true"} except exception.FloatingIpNotFound: raise exception.EC2APIError(_('Unable to associate IP Address.')) def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, address=public_ip) return {'return': "true"} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = ec2utils.id_to_glance_id(context, kernel['id']) if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context, ramdisk['id']) for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) image_uuid = ec2utils.id_to_glance_id(context, image['id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFound(image_id=kwargs['image_id']) if image_state != 'available': raise exception.EC2APIError(_('Image must be available')) (instances, resv_id) = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_href=image_uuid, min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, resv_id) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) previous_states = [] for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) previous_states.append(instance) self.compute_api.delete(context, instance) return self._format_terminate_instances(context, instance_id, previous_states) def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.reboot(context, instance, 'HARD') return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to stop instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.stop(context, instance) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to start instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.start(context, instance) return True def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if ec2utils.image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by GlanceImageService to S3 format.""" i = {} image_type = ec2utils.image_type(image.get('container_format')) ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari') i['imageOwnerId'] = image.get('owner') img_loc = image['properties'].get('image_location') if img_loc: i['imageLocation'] = img_loc else: i['imageLocation'] = "%s (%s)" % (img_loc, name) i['name'] = name if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name i['name'] = img_loc i['imageState'] = self._get_image_state(image) i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = not not image.get('is_public') i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or block_device.DEFAULT_ROOT_DEV_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return {'imageId': image_id} def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = ec2utils.image_type(image.get('container_format')) image_id = ec2utils.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and kwargs.get('name'): image_location = kwargs['name'] if image_location is None: raise exception.EC2APIError(_('imageLocation is required')) metadata = {'properties': {'image_location': image_location}} if kwargs.get('name'): metadata['name'] = kwargs['name'] else: metadata['name'] = image_location if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = kwargs.get( 'root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): _prop_root_dev_name = block_device.properties_root_device_name result['rootDeviceName'] = _prop_root_dev_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.EC2APIError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.EC2APIError(_('attribute not supported: %s') % attribute) if not 'user_group' in kwargs: raise exception.EC2APIError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.EC2APIError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.EC2APIError(msg) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') try: return self.image_service.update(context, internal_id, image) except exception.ImageNotAuthorized: msg = _('Not allowed to modify attributes for image %s') raise exception.EC2APIError(msg % image_id) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) validate_ec2_id(instance_id) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF): restart_instance = True self.compute_api.stop(context, instance) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 if time.time() > start_time + timeout: raise exception.EC2APIError( _('Couldn\'t stop instance with in %d sec') % timeout) src_image = self._get_image(context, instance['image_ref']) properties = src_image['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] mapping = [] bdms = db.block_device_mapping_get_all_by_instance(context, instance_id) for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( context, volume, volume['display_name'], volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in _properties_get_mappings(properties): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): src_image.pop(attr, None) image_id = self._register_image(context, src_image) if restart_instance: self.compute_api.start(context, instance_id=instance_id) return {'imageId': image_id}
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3660_0
crossvul-python_data_good_3768_0
from django.conf.urls.defaults import patterns from django.contrib.auth.urls import urlpatterns from django.contrib.auth.views import password_reset from django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.template import Template, RequestContext from django.views.decorators.cache import never_cache @never_cache def remote_user_auth_view(request): "Dummy view for remote user tests" t = Template("Username is {{ user }}.") c = RequestContext(request, {}) return HttpResponse(t.render(c)) # special urls for auth test cases urlpatterns = urlpatterns + patterns('', (r'^logout/custom_query/$', 'django.contrib.auth.views.logout', dict(redirect_field_name='follow')), (r'^logout/next_page/$', 'django.contrib.auth.views.logout', dict(next_page='/somewhere/')), (r'^remote_user/$', remote_user_auth_view), (r'^password_reset_from_email/$', 'django.contrib.auth.views.password_reset', dict(from_email='staffmember@example.com')), (r'^admin_password_reset/$', 'django.contrib.auth.views.password_reset', dict(is_admin_site=True)), (r'^login_required/$', login_required(password_reset)), (r'^login_required_login_url/$', login_required(password_reset, login_url='/somewhere/')), )
./CrossVul/dataset_final_sorted/CWE-20/py/good_3768_0
crossvul-python_data_bad_1740_2
"""Tornado handlers for the contents web service. Preliminary documentation at https://github.com/ipython/ipython/wiki/IPEP-27%3A-Contents-Service """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import json from tornado import gen, web from notebook.utils import url_path_join, url_escape from jupyter_client.jsonutil import date_default from notebook.base.handlers import ( IPythonHandler, APIHandler, json_errors, path_regex, ) def sort_key(model): """key function for case-insensitive sort by name and type""" iname = model['name'].lower() type_key = { 'directory' : '0', 'notebook' : '1', 'file' : '2', }.get(model['type'], '9') return u'%s%s' % (type_key, iname) def validate_model(model, expect_content): """ Validate a model returned by a ContentsManager method. If expect_content is True, then we expect non-null entries for 'content' and 'format'. """ required_keys = { "name", "path", "type", "writable", "created", "last_modified", "mimetype", "content", "format", } missing = required_keys - set(model.keys()) if missing: raise web.HTTPError( 500, u"Missing Model Keys: {missing}".format(missing=missing), ) maybe_none_keys = ['content', 'format'] if model['type'] == 'file': # mimetype should be populated only for file models maybe_none_keys.append('mimetype') if expect_content: errors = [key for key in maybe_none_keys if model[key] is None] if errors: raise web.HTTPError( 500, u"Keys unexpectedly None: {keys}".format(keys=errors), ) else: errors = { key: model[key] for key in maybe_none_keys if model[key] is not None } if errors: raise web.HTTPError( 500, u"Keys unexpectedly not None: {keys}".format(keys=errors), ) class ContentsHandler(APIHandler): SUPPORTED_METHODS = (u'GET', u'PUT', u'PATCH', u'POST', u'DELETE') def location_url(self, path): """Return the full URL location of a file. Parameters ---------- path : unicode The API path of the file, such as "foo/bar.txt". """ return url_escape(url_path_join( self.base_url, 'api', 'contents', path )) def _finish_model(self, model, location=True): """Finish a JSON request with a model, setting relevant headers, etc.""" if location: location = self.location_url(model['path']) self.set_header('Location', location) self.set_header('Last-Modified', model['last_modified']) self.set_header('Content-Type', 'application/json') self.finish(json.dumps(model, default=date_default)) @web.authenticated @json_errors @gen.coroutine def get(self, path=''): """Return a model for a file or directory. A directory model contains a list of models (without content) of the files and directories it contains. """ path = path or '' type = self.get_query_argument('type', default=None) if type not in {None, 'directory', 'file', 'notebook'}: raise web.HTTPError(400, u'Type %r is invalid' % type) format = self.get_query_argument('format', default=None) if format not in {None, 'text', 'base64'}: raise web.HTTPError(400, u'Format %r is invalid' % format) content = self.get_query_argument('content', default='1') if content not in {'0', '1'}: raise web.HTTPError(400, u'Content %r is invalid' % content) content = int(content) model = yield gen.maybe_future(self.contents_manager.get( path=path, type=type, format=format, content=content, )) if model['type'] == 'directory' and content: # group listing by type, then by name (case-insensitive) # FIXME: sorting should be done in the frontends model['content'].sort(key=sort_key) validate_model(model, expect_content=content) self._finish_model(model, location=False) @web.authenticated @json_errors @gen.coroutine def patch(self, path=''): """PATCH renames a file or directory without re-uploading content.""" cm = self.contents_manager model = self.get_json_body() if model is None: raise web.HTTPError(400, u'JSON body missing') model = yield gen.maybe_future(cm.update(model, path)) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _copy(self, copy_from, copy_to=None): """Copy a file, optionally specifying a target directory.""" self.log.info(u"Copying {copy_from} to {copy_to}".format( copy_from=copy_from, copy_to=copy_to or '', )) model = yield gen.maybe_future(self.contents_manager.copy(copy_from, copy_to)) self.set_status(201) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _upload(self, model, path): """Handle upload of a new file to path""" self.log.info(u"Uploading file to %s", path) model = yield gen.maybe_future(self.contents_manager.new(model, path)) self.set_status(201) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _new_untitled(self, path, type='', ext=''): """Create a new, empty untitled entity""" self.log.info(u"Creating new %s in %s", type or 'file', path) model = yield gen.maybe_future(self.contents_manager.new_untitled(path=path, type=type, ext=ext)) self.set_status(201) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _save(self, model, path): """Save an existing file.""" self.log.info(u"Saving file at %s", path) model = yield gen.maybe_future(self.contents_manager.save(model, path)) validate_model(model, expect_content=False) self._finish_model(model) @web.authenticated @json_errors @gen.coroutine def post(self, path=''): """Create a new file in the specified path. POST creates new files. The server always decides on the name. POST /api/contents/path New untitled, empty file or directory. POST /api/contents/path with body {"copy_from" : "/path/to/OtherNotebook.ipynb"} New copy of OtherNotebook in path """ cm = self.contents_manager if cm.file_exists(path): raise web.HTTPError(400, "Cannot POST to files, use PUT instead.") if not cm.dir_exists(path): raise web.HTTPError(404, "No such directory: %s" % path) model = self.get_json_body() if model is not None: copy_from = model.get('copy_from') ext = model.get('ext', '') type = model.get('type', '') if copy_from: yield self._copy(copy_from, path) else: yield self._new_untitled(path, type=type, ext=ext) else: yield self._new_untitled(path) @web.authenticated @json_errors @gen.coroutine def put(self, path=''): """Saves the file in the location specified by name and path. PUT is very similar to POST, but the requester specifies the name, whereas with POST, the server picks the name. PUT /api/contents/path/Name.ipynb Save notebook at ``path/Name.ipynb``. Notebook structure is specified in `content` key of JSON request body. If content is not specified, create a new empty notebook. """ model = self.get_json_body() if model: if model.get('copy_from'): raise web.HTTPError(400, "Cannot copy with PUT, only POST") exists = yield gen.maybe_future(self.contents_manager.file_exists(path)) if exists: yield gen.maybe_future(self._save(model, path)) else: yield gen.maybe_future(self._upload(model, path)) else: yield gen.maybe_future(self._new_untitled(path)) @web.authenticated @json_errors @gen.coroutine def delete(self, path=''): """delete a file in the given path""" cm = self.contents_manager self.log.warn('delete %s', path) yield gen.maybe_future(cm.delete(path)) self.set_status(204) self.finish() class CheckpointsHandler(APIHandler): SUPPORTED_METHODS = ('GET', 'POST') @web.authenticated @json_errors @gen.coroutine def get(self, path=''): """get lists checkpoints for a file""" cm = self.contents_manager checkpoints = yield gen.maybe_future(cm.list_checkpoints(path)) data = json.dumps(checkpoints, default=date_default) self.finish(data) @web.authenticated @json_errors @gen.coroutine def post(self, path=''): """post creates a new checkpoint""" cm = self.contents_manager checkpoint = yield gen.maybe_future(cm.create_checkpoint(path)) data = json.dumps(checkpoint, default=date_default) location = url_path_join(self.base_url, 'api/contents', path, 'checkpoints', checkpoint['id']) self.set_header('Location', url_escape(location)) self.set_status(201) self.finish(data) class ModifyCheckpointsHandler(APIHandler): SUPPORTED_METHODS = ('POST', 'DELETE') @web.authenticated @json_errors @gen.coroutine def post(self, path, checkpoint_id): """post restores a file from a checkpoint""" cm = self.contents_manager yield gen.maybe_future(cm.restore_checkpoint(checkpoint_id, path)) self.set_status(204) self.finish() @web.authenticated @json_errors @gen.coroutine def delete(self, path, checkpoint_id): """delete clears a checkpoint for a given file""" cm = self.contents_manager yield gen.maybe_future(cm.delete_checkpoint(checkpoint_id, path)) self.set_status(204) self.finish() class NotebooksRedirectHandler(IPythonHandler): """Redirect /api/notebooks to /api/contents""" SUPPORTED_METHODS = ('GET', 'PUT', 'PATCH', 'POST', 'DELETE') def get(self, path): self.log.warn("/api/notebooks is deprecated, use /api/contents") self.redirect(url_path_join( self.base_url, 'api/contents', path )) put = patch = post = delete = get #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- _checkpoint_id_regex = r"(?P<checkpoint_id>[\w-]+)" default_handlers = [ (r"/api/contents%s/checkpoints" % path_regex, CheckpointsHandler), (r"/api/contents%s/checkpoints/%s" % (path_regex, _checkpoint_id_regex), ModifyCheckpointsHandler), (r"/api/contents%s" % path_regex, ContentsHandler), (r"/api/notebooks/?(.*)", NotebooksRedirectHandler), ]
./CrossVul/dataset_final_sorted/CWE-20/py/bad_1740_2
crossvul-python_data_good_3659_1
# Copyright 2011 OpenStack LLC. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" import urllib from xml.dom import minidom from webob import exc import webob from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging from nova import quota from nova import utils LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS authorize = extensions.extension_authorizer('compute', 'security_groups') def make_rule(elem): elem.set('id') elem.set('parent_group_id') proto = xmlutil.SubTemplateElement(elem, 'ip_protocol') proto.text = 'ip_protocol' from_port = xmlutil.SubTemplateElement(elem, 'from_port') from_port.text = 'from_port' to_port = xmlutil.SubTemplateElement(elem, 'to_port') to_port.text = 'to_port' group = xmlutil.SubTemplateElement(elem, 'group', selector='group') name = xmlutil.SubTemplateElement(group, 'name') name.text = 'name' tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id') tenant_id.text = 'tenant_id' ip_range = xmlutil.SubTemplateElement(elem, 'ip_range', selector='ip_range') cidr = xmlutil.SubTemplateElement(ip_range, 'cidr') cidr.text = 'cidr' def make_sg(elem): elem.set('id') elem.set('tenant_id') elem.set('name') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' rules = xmlutil.SubTemplateElement(elem, 'rules') rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules') make_rule(rule) sg_nsmap = {None: wsgi.XMLNS_V11} class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group_rule', selector='security_group_rule') make_rule(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group', selector='security_group') make_sg(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_groups') elem = xmlutil.SubTemplateElement(root, 'security_group', selector='security_groups') make_sg(elem) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler) def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule.id sg_rule['parent_group_id'] = rule.parent_group_id sg_rule['ip_protocol'] = rule.protocol sg_rule['from_port'] = rule.from_port sg_rule['to_port'] = rule.to_port sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule.group_id: source_group = db.security_group_get(context, rule.group_id) sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: sg_rule['ip_range'] = {'cidr': rule.cidr} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group.id security_group['description'] = group.description security_group['name'] = group.name security_group['tenant_id'] = group.project_id security_group['rules'] = [] for rule in group.rules: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group class SecurityGroupController(SecurityGroupControllerBase): """The Security group API controller for the OpenStack API.""" def _get_security_group(self, context, id): try: id = int(id) security_group = db.security_group_get(context, id) except ValueError: msg = _("Security group id should be integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) return security_group @wsgi.serializers(xml=SecurityGroupTemplate) def show(self, req, id): """Return data about the given security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) if db.security_group_in_use(context, security_group.id): msg = _("Security group is still in use") raise exc.HTTPBadRequest(explanation=msg) LOG.audit(_("Delete security group %s"), id, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh( context, security_group.id) return webob.Response(status_int=202) @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req): """Returns a list of security groups""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) groups = db.security_group_get_by_project(context, context.project_id) limited_list = common.limited(groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @wsgi.serializers(xml=SecurityGroupTemplate) @wsgi.deserializers(xml=SecurityGroupXMLDeserializer) def create(self, req, body): """Creates a new security group.""" context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() security_group = body.get('security_group', None) if security_group is None: raise exc.HTTPUnprocessableEntity() group_name = security_group.get('name', None) group_description = security_group.get('description', None) self._validate_security_group_property(group_name, "name") self._validate_security_group_property(group_description, "description") group_name = group_name.strip() group_description = group_description.strip() if quota.allowed_security_groups(context, 1) < 1: msg = _("Quota exceeded, too many security groups.") raise exc.HTTPBadRequest(explanation=msg) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('Security group %s already exists') % group_name raise exc.HTTPBadRequest(explanation=msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'security_group': self._format_security_group(context, group_ref)} def _validate_security_group_property(self, value, typ): """ typ will be either 'name' or 'description', depending on the caller """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % typ raise exc.HTTPBadRequest(explanation=msg) if not val: msg = _("Security group %s cannot be empty.") % typ raise exc.HTTPBadRequest(explanation=msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) class SecurityGroupRulesController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupRuleTemplate) @wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer) def create(self, req, body): context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() if not 'security_group_rule' in body: raise exc.HTTPUnprocessableEntity() self.compute_api.ensure_default_security_group(context) sg_rule = body['security_group_rule'] parent_group_id = sg_rule.get('parent_group_id', None) try: parent_group_id = int(parent_group_id) security_group = db.security_group_get(context, parent_group_id) except ValueError: msg = _("Parent group id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Security group (%s) not found") % parent_group_id raise exc.HTTPNotFound(explanation=msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) try: values = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), parent_group_id=sg_rule.get('parent_group_id'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if values is None: msg = _("Not enough parameters to build a " "valid rule.") raise exc.HTTPBadRequest(explanation=msg) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): msg = _('This rule already exists in group %s') % parent_group_id raise exc.HTTPBadRequest(explanation=msg) allowed = quota.allowed_security_group_rules(context, parent_group_id, 1) if allowed < 1: msg = _("Quota exceeded, too many security group rules.") raise exc.HTTPBadRequest(explanation=msg) security_group_rule = db.security_group_rule_create(context, values) self.sgh.trigger_security_group_rule_create_refresh( context, [security_group_rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return True return False def _rule_args_to_dict(self, context, to_port=None, from_port=None, parent_group_id=None, ip_protocol=None, cidr=None, group_id=None): values = {} if group_id is not None: try: parent_group_id = int(parent_group_id) group_id = int(group_id) except ValueError: msg = _("Parent or group id is not integer") raise exception.InvalidInput(reason=msg) values['group_id'] = group_id #check if groupId exists db.security_group_get(context, group_id) elif cidr: # If this fails, it throws an exception. This is what we want. try: cidr = urllib.unquote(cidr).decode() except Exception: raise exception.InvalidCidr(cidr=cidr) if not utils.is_valid_cidr(cidr): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr) values['cidr'] = cidr else: values['cidr'] = '0.0.0.0/0' if group_id: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and from_port > to_port): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol.lower() values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def delete(self, req, id): context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: id = int(id) rule = db.security_group_rule_get(context, id) except ValueError: msg = _("Rule id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound: msg = _("Rule (%s) not found") % id raise exc.HTTPNotFound(explanation=msg) group_id = rule.parent_group_id self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get(context, group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, [rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return webob.Response(status_int=202) class ServerSecurityGroupController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: instance = self.compute_api.get(context, server_id) groups = db.security_group_get_by_instance(context, instance['id']) except exception.ApiError, e: raise webob.exc.HTTPBadRequest(explanation=e.message) except exception.NotAuthorized, e: raise webob.exc.HTTPUnauthorized() result = [self._format_security_group(context, group) for group in groups] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} class SecurityGroupActionController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupActionController, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['addSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.add_security_group(context, instance, group_name) self.sgh.trigger_instance_add_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['removeSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.remove_security_group(context, instance, group_name) self.sgh.trigger_instance_remove_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) class Security_groups(extensions.ExtensionDescriptor): """Security group support""" name = "SecurityGroups" alias = "security_groups" namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" updated = "2011-07-21T00:00:00+00:00" def get_controller_extensions(self): controller = SecurityGroupActionController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): resources = [] res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController()) resources.append(res) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController()) resources.append(res) res = extensions.ResourceExtension( 'os-security-groups', controller=ServerSecurityGroupController(), parent=dict(member_name='server', collection_name='servers')) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-20/py/good_3659_1
crossvul-python_data_good_5582_1
import os import os.path import mimetypes import requests from zope.interface import implements from pyramid.interfaces import ITemplateRenderer class ReleaseFileRenderer(object): implements(ITemplateRenderer) def __init__(self, repository_root): self.repository_root = repository_root def __call__(self, value, system): if 'request' in system: request = system['request'] mime, encoding = mimetypes.guess_type(value['filename']) request.response_content_type = mime if encoding: request.response_encoding = encoding f = os.path.join(self.repository_root, value['filename'][0].lower(), value['filename']) if not os.path.exists(f): dir_ = os.path.join(self.repository_root, value['filename'][0].lower()) if not os.path.exists(dir_): os.makedirs(dir_, 0750) if value['url'].startswith('https://pypi.python.org'): verify = os.path.join(os.path.dirname(__file__), 'pypi.pem') else: verify = value['url'].startswith('https:') resp = requests.get(value['url'], verify=verify) with open(f, 'wb') as rf: rf.write(resp.content) return resp.content else: data = '' with open(f, 'rb') as rf: data = '' while True: content = rf.read(2<<16) if not content: break data += content return data def renderer_factory(info): return ReleaseFileRenderer(info.settings['pyshop.repository'])
./CrossVul/dataset_final_sorted/CWE-20/py/good_5582_1
crossvul-python_data_good_3767_3
from __future__ import absolute_import, unicode_literals import copy import datetime from email.header import Header import os import re import sys import time import warnings from io import BytesIO from pprint import pformat try: from urllib.parse import quote, parse_qsl, urlencode, urljoin, urlparse except ImportError: # Python 2 from urllib import quote, urlencode from urlparse import parse_qsl, urljoin, urlparse from django.utils.six.moves import http_cookies # Some versions of Python 2.7 and later won't need this encoding bug fix: _cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"') # See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256 _tc = http_cookies.SimpleCookie() try: _tc.load(str('foo:bar=1')) _cookie_allows_colon_in_names = True except http_cookies.CookieError: _cookie_allows_colon_in_names = False if _cookie_encodes_correctly and _cookie_allows_colon_in_names: SimpleCookie = http_cookies.SimpleCookie else: Morsel = http_cookies.Morsel class SimpleCookie(http_cookies.SimpleCookie): if not _cookie_encodes_correctly: def value_encode(self, val): # Some browsers do not support quoted-string from RFC 2109, # including some versions of Safari and Internet Explorer. # These browsers split on ';', and some versions of Safari # are known to split on ', '. Therefore, we encode ';' and ',' # SimpleCookie already does the hard work of encoding and decoding. # It uses octal sequences like '\\012' for newline etc. # and non-ASCII chars. We just make use of this mechanism, to # avoid introducing two encoding schemes which would be confusing # and especially awkward for javascript. # NB, contrary to Python docs, value_encode returns a tuple containing # (real val, encoded_val) val, encoded = super(SimpleCookie, self).value_encode(val) encoded = encoded.replace(";", "\\073").replace(",","\\054") # If encoded now contains any quoted chars, we need double quotes # around the whole string. if "\\" in encoded and not encoded.startswith('"'): encoded = '"' + encoded + '"' return val, encoded if not _cookie_allows_colon_in_names: def load(self, rawdata): self.bad_cookies = set() super(SimpleCookie, self).load(force_str(rawdata)) for key in self.bad_cookies: del self[key] # override private __set() method: # (needed for using our Morsel, and for laxness with CookieError def _BaseCookie__set(self, key, real_value, coded_value): key = force_str(key) try: M = self.get(key, Morsel()) M.set(key, real_value, coded_value) dict.__setitem__(self, key, M) except http_cookies.CookieError: self.bad_cookies.add(key) dict.__setitem__(self, key, http_cookies.Morsel()) from django.conf import settings from django.core import signing from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation from django.core.files import uploadhandler from django.http.multipartparser import MultiPartParser from django.http.utils import * from django.utils.datastructures import MultiValueDict, ImmutableList from django.utils.encoding import force_bytes, force_str, force_text, iri_to_uri from django.utils.http import cookie_date from django.utils import six from django.utils import timezone RESERVED_CHARS="!*'();:@&=+$,/?%#[]" absolute_http_url_re = re.compile(r"^https?://", re.I) class Http404(Exception): pass RAISE_ERROR = object() def build_request_repr(request, path_override=None, GET_override=None, POST_override=None, COOKIES_override=None, META_override=None): """ Builds and returns the request's representation string. The request's attributes may be overridden by pre-processed values. """ # Since this is called as part of error handling, we need to be very # robust against potentially malformed input. try: get = (pformat(GET_override) if GET_override is not None else pformat(request.GET)) except Exception: get = '<could not parse>' if request._post_parse_error: post = '<could not parse>' else: try: post = (pformat(POST_override) if POST_override is not None else pformat(request.POST)) except Exception: post = '<could not parse>' try: cookies = (pformat(COOKIES_override) if COOKIES_override is not None else pformat(request.COOKIES)) except Exception: cookies = '<could not parse>' try: meta = (pformat(META_override) if META_override is not None else pformat(request.META)) except Exception: meta = '<could not parse>' path = path_override if path_override is not None else request.path return force_str('<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % (request.__class__.__name__, path, six.text_type(get), six.text_type(post), six.text_type(cookies), six.text_type(meta))) class UnreadablePostError(IOError): pass class HttpRequest(object): """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {} self.path = '' self.path_info = '' self.method = None self._post_parse_error = False def __repr__(self): return build_request_repr(self) def get_host(self): """Returns the HTTP host using the environment or request headers.""" # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = str(self.META['SERVER_PORT']) if server_port != ('443' if self.is_secure() else '80'): host = '%s:%s' % (host, server_port) # Disallow potentially poisoned hostnames. if set(';/?@&=+$,').intersection(host): raise SuspiciousOperation('Invalid HTTP_HOST header: %s' % host) return host def get_full_path(self): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s' % (self.path, ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '') def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None): """ Attempts to return a signed cookie. If the signature fails or the cookie has expired, raises an exception... unless you provide the default argument in which case that value will be returned instead. """ try: cookie_value = self.COOKIES[key] except KeyError: if default is not RAISE_ERROR: return default else: raise try: value = signing.get_cookie_signer(salt=key + salt).unsign( cookie_value, max_age=max_age) except signing.BadSignature: if default is not RAISE_ERROR: return default else: raise return value def build_absolute_uri(self, location=None): """ Builds an absolute URI from the location and the variables available in this request. If no location is specified, the absolute URI is built on ``request.get_full_path()``. """ if not location: location = self.get_full_path() if not absolute_http_url_re.match(location): current_uri = '%s://%s%s' % ('https' if self.is_secure() else 'http', self.get_host(), self.path) location = urljoin(current_uri, location) return iri_to_uri(location) def _is_secure(self): return os.environ.get("HTTPS") == "on" def is_secure(self): # First, check the SECURE_PROXY_SSL_HEADER setting. if settings.SECURE_PROXY_SSL_HEADER: try: header, value = settings.SECURE_PROXY_SSL_HEADER except ValueError: raise ImproperlyConfigured('The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.') if self.META.get(header, None) == value: return True # Failing that, fall back to _is_secure(), which is a hook for # subclasses to implement. return self._is_secure() def is_ajax(self): return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' @property def encoding(self): return self._encoding @encoding.setter def encoding(self, val): """ Sets the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, it is removed and recreated on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, '_get'): del self._get if hasattr(self, '_post'): del self._post def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] @property def upload_handlers(self): if not self._upload_handlers: # If there are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers @upload_handlers.setter def upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def parse_file_upload(self, META, post_data): """Returns a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning="You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() @property def body(self): if not hasattr(self, '_body'): if self._read_started: raise Exception("You cannot access body after reading from request's data stream") try: self._body = self.read() except IOError as e: six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2]) self._stream = BytesIO(self._body) return self._body @property def raw_post_data(self): warnings.warn('HttpRequest.raw_post_data has been deprecated. Use HttpRequest.body instead.', DeprecationWarning) return self.body def _mark_post_parse_error(self): self._post = QueryDict('') self._files = MultiValueDict() self._post_parse_error = True def _load_post_and_files(self): # Populates self._post and self._files if self.method != 'POST': self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_body'): self._mark_post_parse_error() return if self.META.get('CONTENT_TYPE', '').startswith('multipart'): if hasattr(self, '_body'): # Use already read data data = BytesIO(self._body) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except: # An error occured while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. # Mark that an error occured. This allows self.__repr__ to # be explicit about it instead of simply representing an # empty POST self._mark_post_parse_error() raise else: self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict() ## File-like and iterator interface. ## ## Expects self._stream to be set to an appropriate source of bytes by ## a corresponding request subclass (e.g. WSGIRequest). ## Also when request data has already been read by request.POST or ## request.body, self._stream points to a BytesIO instance ## containing that data. def read(self, *args, **kwargs): self._read_started = True return self._stream.read(*args, **kwargs) def readline(self, *args, **kwargs): self._read_started = True return self._stream.readline(*args, **kwargs) def xreadlines(self): while True: buf = self.readline() if not buf: break yield buf __iter__ = xreadlines def readlines(self): return list(iter(self)) class QueryDict(MultiValueDict): """ A specialized MultiValueDict that takes a query string when initialized. This is immutable unless you create a copy of it. Values retrieved from this class are converted from the given encoding (DEFAULT_CHARSET by default) to unicode. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string, mutable=False, encoding=None): super(QueryDict, self).__init__() if not encoding: encoding = settings.DEFAULT_CHARSET self.encoding = encoding if six.PY3: for key, value in parse_qsl(query_string or '', keep_blank_values=True, encoding=encoding): self.appendlist(key, value) else: for key, value in parse_qsl(query_string or '', keep_blank_values=True): self.appendlist(force_text(key, encoding, errors='replace'), force_text(value, encoding, errors='replace')) self._mutable = mutable @property def encoding(self): if self._encoding is None: self._encoding = settings.DEFAULT_CHARSET return self._encoding @encoding.setter def encoding(self, value): self._encoding = value def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super(QueryDict, self).__setitem__(key, value) def __delitem__(self, key): self._assert_mutable() super(QueryDict, self).__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in six.iterlists(self): result.setlist(key, value) return result def __deepcopy__(self, memo): result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in six.iterlists(self): result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = bytes_to_text(key, self.encoding) list_ = [bytes_to_text(elt, self.encoding) for elt in list_] super(QueryDict, self).setlist(key, list_) def setlistdefault(self, key, default_list=None): self._assert_mutable() return super(QueryDict, self).setlistdefault(key, default_list) def appendlist(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super(QueryDict, self).appendlist(key, value) def pop(self, key, *args): self._assert_mutable() return super(QueryDict, self).pop(key, *args) def popitem(self): self._assert_mutable() return super(QueryDict, self).popitem() def clear(self): self._assert_mutable() super(QueryDict, self).clear() def setdefault(self, key, default=None): self._assert_mutable() key = bytes_to_text(key, self.encoding) default = bytes_to_text(default, self.encoding) return super(QueryDict, self).setdefault(key, default) def copy(self): """Returns a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Returns an encoded string of all query string arguments. :arg safe: Used to specify characters which do not require quoting, for example:: >>> q = QueryDict('', mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: safe = force_bytes(safe, self.encoding) encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe))) else: encode = lambda k, v: urlencode({k: v}) for k, list_ in self.lists(): k = force_bytes(k, self.encoding) output.extend([encode(k, force_bytes(v, self.encoding)) for v in list_]) return '&'.join(output) def parse_cookie(cookie): if cookie == '': return {} if not isinstance(cookie, http_cookies.BaseCookie): try: c = SimpleCookie() c.load(cookie) except http_cookies.CookieError: # Invalid cookie return {} else: c = cookie cookiedict = {} for key in c.keys(): cookiedict[key] = c.get(key).value return cookiedict class BadHeaderError(ValueError): pass class HttpResponse(object): """A basic HTTP response, with content and dictionary-accessed headers.""" status_code = 200 def __init__(self, content='', content_type=None, status=None, mimetype=None): # _headers is a mapping of the lower-case name to the original case of # the header (required for working with legacy systems) and the header # value. Both the name of the header and its value are ASCII strings. self._headers = {} self._charset = settings.DEFAULT_CHARSET if mimetype: warnings.warn("Using mimetype keyword argument is deprecated, use" " content_type instead", PendingDeprecationWarning) content_type = mimetype if not content_type: content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE, self._charset) # content is a bytestring. See the content property methods. self.content = content self.cookies = SimpleCookie() if status: self.status_code = status self['Content-Type'] = content_type def serialize(self): """Full HTTP message, including headers, as a bytestring.""" headers = [ ('%s: %s' % (key, value)).encode('us-ascii') for key, value in self._headers.values() ] return b'\r\n'.join(headers) + b'\r\n\r\n' + self.content if six.PY3: __bytes__ = serialize else: __str__ = serialize def _convert_to_charset(self, value, charset, mime_encode=False): """Converts headers key/value to ascii/latin1 native strings. `charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and `value` value can't be represented in the given charset, MIME-encoding is applied. """ if not isinstance(value, (bytes, six.text_type)): value = str(value) try: if six.PY3: if isinstance(value, str): # Ensure string is valid in given charset value.encode(charset) else: # Convert bytestring using given charset value = value.decode(charset) else: if isinstance(value, str): # Ensure string is valid in given charset value.decode(charset) else: # Convert unicode string to given charset value = value.encode(charset) except UnicodeError as e: if mime_encode: # Wrapping in str() is a workaround for #12422 under Python 2. value = str(Header(value, 'utf-8').encode()) else: e.reason += ', HTTP response headers must be in %s format' % charset raise if str('\n') in value or str('\r') in value: raise BadHeaderError("Header values can't contain newlines (got %r)" % value) return value def __setitem__(self, header, value): header = self._convert_to_charset(header, 'ascii') value = self._convert_to_charset(value, 'latin1', mime_encode=True) self._headers[header.lower()] = (header, value) def __delitem__(self, header): try: del self._headers[header.lower()] except KeyError: pass def __getitem__(self, header): return self._headers[header.lower()][1] def __getstate__(self): # SimpleCookie is not pickeable with pickle.HIGHEST_PROTOCOL, so we # serialise to a string instead state = self.__dict__.copy() state['cookies'] = str(state['cookies']) return state def __setstate__(self, state): self.__dict__.update(state) self.cookies = SimpleCookie(self.cookies) def has_header(self, header): """Case-insensitive check for a header.""" return header.lower() in self._headers __contains__ = has_header def items(self): return self._headers.values() def get(self, header, alternate=None): return self._headers.get(header.lower(), (None, alternate))[1] def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False): """ Sets a cookie. ``expires`` can be: - a string in the correct format, - a naive ``datetime.datetime`` object in UTC, - an aware ``datetime.datetime`` object in any time zone. If it is a ``datetime.datetime`` object then ``max_age`` will be calculated. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): if timezone.is_aware(expires): expires = timezone.make_naive(expires, timezone.utc) delta = expires - expires.utcnow() # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta = delta + datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]['expires'] = expires if max_age is not None: self.cookies[key]['max-age'] = max_age # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]['expires'] = cookie_date(time.time() + max_age) if path is not None: self.cookies[key]['path'] = path if domain is not None: self.cookies[key]['domain'] = domain if secure: self.cookies[key]['secure'] = True if httponly: self.cookies[key]['httponly'] = True def set_signed_cookie(self, key, value, salt='', **kwargs): value = signing.get_cookie_signer(salt=key + salt).sign(value) return self.set_cookie(key, value, **kwargs) def delete_cookie(self, key, path='/', domain=None): self.set_cookie(key, max_age=0, path=path, domain=domain, expires='Thu, 01-Jan-1970 00:00:00 GMT') @property def content(self): if self.has_header('Content-Encoding'): def make_bytes(value): if isinstance(value, int): value = six.text_type(value) if isinstance(value, six.text_type): value = value.encode('ascii') # force conversion to bytes in case chunk is a subclass return bytes(value) return b''.join(make_bytes(e) for e in self._container) return b''.join(force_bytes(e, self._charset) for e in self._container) @content.setter def content(self, value): if hasattr(value, '__iter__') and not isinstance(value, (bytes, six.string_types)): self._container = value self._base_content_is_iter = True else: self._container = [value] self._base_content_is_iter = False def __iter__(self): self._iterator = iter(self._container) return self def __next__(self): chunk = next(self._iterator) if isinstance(chunk, int): chunk = six.text_type(chunk) if isinstance(chunk, six.text_type): chunk = chunk.encode(self._charset) # force conversion to bytes in case chunk is a subclass return bytes(chunk) next = __next__ # Python 2 compatibility def close(self): if hasattr(self._container, 'close'): self._container.close() # The remaining methods partially implement the file-like object interface. # See http://docs.python.org/lib/bltin-file-objects.html def write(self, content): if self._base_content_is_iter: raise Exception("This %s instance is not writable" % self.__class__) self._container.append(content) def flush(self): pass def tell(self): if self._base_content_is_iter: raise Exception("This %s instance cannot tell its position" % self.__class__) return sum([len(chunk) for chunk in self]) class HttpResponseRedirectBase(HttpResponse): allowed_schemes = ['http', 'https', 'ftp'] def __init__(self, redirect_to, *args, **kwargs): parsed = urlparse(redirect_to) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise SuspiciousOperation("Unsafe redirect to URL with protocol '%s'" % parsed.scheme) super(HttpResponseRedirectBase, self).__init__(*args, **kwargs) self['Location'] = iri_to_uri(redirect_to) class HttpResponseRedirect(HttpResponseRedirectBase): status_code = 302 class HttpResponsePermanentRedirect(HttpResponseRedirectBase): status_code = 301 class HttpResponseNotModified(HttpResponse): status_code = 304 def __init__(self, *args, **kwargs): super(HttpResponseNotModified, self).__init__(*args, **kwargs) del self['content-type'] @HttpResponse.content.setter def content(self, value): if value: raise AttributeError("You cannot set content to a 304 (Not Modified) response") self._container = [] class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods, *args, **kwargs): super(HttpResponseNotAllowed, self).__init__(*args, **kwargs) self['Allow'] = ', '.join(permitted_methods) class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 # A backwards compatible alias for HttpRequest.get_host. def get_host(request): return request.get_host() # It's neither necessary nor appropriate to use # django.utils.encoding.smart_text for parsing URLs and form inputs. Thus, # this slightly more restricted function, used by QueryDict. def bytes_to_text(s, encoding): """ Converts basestring objects to unicode, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Returns any non-basestring objects without change. """ if isinstance(s, bytes): return six.text_type(s, encoding, 'replace') else: return s
./CrossVul/dataset_final_sorted/CWE-20/py/good_3767_3
crossvul-python_data_bad_1800_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-20/py/bad_1800_0
crossvul-python_data_good_50_8
# -*- coding: utf-8 -*- from setuptools import setup, find_packages import os import stat import sys #VERSION="2.1dev4" VERSION="2.23.2" # Taken from kennethreitz/requests/setup.py package_directory = os.path.realpath(os.path.dirname(__file__)) def get_file_contents(file_path): """Get the context of the file using full path name.""" content = "" try: full_path = os.path.join(package_directory, file_path) content = open(full_path, 'r').read() except: print >> sys.stderr, "### could not open file {0!r}".format(file_path) return content def get_file_list(file_path): full_path = os.path.join(package_directory, file_path) file_list = os.listdir(full_path) # now we need to add the path to the files return [ file_path + f for f in file_list ] install_requires = ["Flask>=0.10.1", "Flask-Migrate>=1.2.0", "Flask-SQLAlchemy>=2.0", "Flask-Script>=2.0.5", "Jinja2>=2.7.3", "Mako>=0.9.1", "MarkupSafe>=0.23", "PyMySQL>=0.6.6", "Pillow>=2.6.1", "PyJWT>=1.3.0", "PyYAML>=3.11", "Pygments>=2.0.2", "SQLAlchemy>=1.0.5", "Werkzeug>=0.10.4", "alembic>=0.6.7", "argparse>=1.2.1", "bcrypt>=1.1.0", "beautifulsoup4>=4.3.2", "cffi>=0.8.6", "configobj>=5.0.6", "docutils>=0.12", "funcparserlib>=0.3.6", "itsdangerous>=0.24", "ldap3>=2.5", "netaddr>=0.7.12", "passlib>=1.6.2", "pyasn1>=0.4.2", "pyOpenSSL>=0.15.1", "pycparser>=2.10", "pycrypto>=2.6.1", "pyrad>=2.0", "pyusb>=1.0.0b2", "qrcode>=5.1", "requests>=2.7.0", "sqlsoup>=0.9.0", "ecdsa>=0.13", "lxml>=3.3", "python-gnupg>=0.3.8", "defusedxml>=0.4.1", "flask-babel>=0.9", "croniter>=0.3.8" ] # For python 2.6 we need additional dependency importlib try: import importlib except ImportError: install_requires.append('importlib') def get_man_pages(dir): """ Get man pages in a directory. :param dir: :return: list of file names """ files = os.listdir(dir) r_files = [] for file in files: if file.endswith(".1"): r_files.append(dir + "/" + file) return r_files def get_scripts(dir): """ Get files that are executable :param dir: :return: list of file names """ files = os.listdir(dir) r_files = [] for file in files: if os.stat(dir + "/" + file)[stat.ST_MODE] & stat.S_IEXEC: r_files.append(dir + "/" + file) return r_files setup( name='privacyIDEA', version=VERSION, description='privacyIDEA: identity, multifactor authentication (OTP), ' 'authorization, audit', author='privacyidea.org', license='AGPLv3', author_email='cornelius@privacyidea.org', url='http://www.privacyidea.org', keywords='OTP, two factor authentication, management, security', packages=find_packages(), scripts=["pi-manage"] + get_scripts("tools"), extras_require={ 'dev': ["Sphinx>=1.3.1", "sphinxcontrib-httpdomain>=1.3.0"], 'test': ["coverage>=3.7.1", "mock>=1.0.1", "pyparsing>=2.0.3", "nose>=1.3.4", "responses>=0.4.0", "six>=1.8.0"], }, install_requires=install_requires, include_package_data=True, data_files=[('etc/privacyidea/', ['deploy/apache/privacyideaapp.wsgi', 'deploy/privacyidea/dictionary', 'deploy/privacyidea/enckey', 'deploy/privacyidea/private.pem', 'deploy/privacyidea/public.pem']), ('share/man/man1', get_man_pages("tools")), ('lib/privacyidea/authmodules/FreeRADIUS', ["authmodules/FreeRADIUS/LICENSE", "authmodules/FreeRADIUS/privacyidea_radius.pm"]), ('lib/privacyidea/authmodules/OTRS', ["authmodules/OTRS/privacyIDEA.pm"]), ('lib/privacyidea/migrations', ["migrations/alembic.ini", "migrations/env.py", "migrations/README", "migrations/script.py.mako"]), ('lib/privacyidea/migrations/versions', get_file_list("migrations/versions/")) ], classifiers=["Framework :: Flask", "License :: OSI Approved :: " "GNU Affero General Public License v3", "Programming Language :: Python", "Development Status :: 5 - Production/Stable", "Topic :: Internet", "Topic :: Security", "Topic :: System ::" " Systems Administration :: Authentication/Directory" ], #message_extractors={'privacyidea': [ # ('**.py', 'python', None), # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]}, zip_safe=False, long_description=get_file_contents('README.rst') )
./CrossVul/dataset_final_sorted/CWE-20/py/good_50_8
crossvul-python_data_bad_2141_0
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# import os import subprocess import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible import utils from ansible import errors import sys class InventoryScript(object): ''' Host inventory parser for ansible using external inventory scripts. ''' def __init__(self, filename=C.DEFAULT_HOST_LIST): # Support inventory scripts that are not prefixed with some # path information but happen to be in the current working # directory when '.' is not in PATH. self.filename = os.path.abspath(filename) cmd = [ self.filename, "--list" ] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (stdout, stderr) = sp.communicate() self.data = stdout # see comment about _meta below self.host_vars_from_top = None self.groups = self._parse(stderr) def _parse(self, err): all_hosts = {} self.raw = utils.parse_json(self.data) all = Group('all') groups = dict(all=all) group = None if 'failed' in self.raw: sys.stderr.write(err + "\n") raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw) for (group_name, data) in self.raw.items(): # in Ansible 1.3 and later, a "_meta" subelement may contain # a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. if group_name == '_meta': if 'hostvars' in data: self.host_vars_from_top = data['hostvars'] continue if group_name != all.name: group = groups[group_name] = Group(group_name) else: group = all host = None if not isinstance(data, dict): data = {'hosts': data} elif not any(k in data for k in ('hosts','vars')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: for hostname in data['hosts']: if not hostname in all_hosts: all_hosts[hostname] = Host(hostname) host = all_hosts[hostname] group.add_host(host) if 'vars' in data: for k, v in data['vars'].iteritems(): if group.name == all.name: all.set_variable(k, v) else: group.set_variable(k, v) if group.name != all.name: all.add_child_group(group) # Separate loop to ensure all groups are defined for (group_name, data) in self.raw.items(): if group_name == '_meta': continue if isinstance(data, dict) and 'children' in data: for child_name in data['children']: if child_name in groups: groups[group_name].add_child_group(groups[child_name]) return groups def get_host_variables(self, host): """ Runs <script> --host <hostname> to determine additional host variables """ if self.host_vars_from_top is not None: got = self.host_vars_from_top.get(host.name, {}) return got cmd = [self.filename, "--host", host.name] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (out, err) = sp.communicate() return utils.parse_json(out)
./CrossVul/dataset_final_sorted/CWE-20/py/bad_2141_0
crossvul-python_data_bad_50_8
# -*- coding: utf-8 -*- from setuptools import setup, find_packages import os import stat import sys #VERSION="2.1dev4" VERSION="2.23.1" # Taken from kennethreitz/requests/setup.py package_directory = os.path.realpath(os.path.dirname(__file__)) def get_file_contents(file_path): """Get the context of the file using full path name.""" content = "" try: full_path = os.path.join(package_directory, file_path) content = open(full_path, 'r').read() except: print >> sys.stderr, "### could not open file {0!r}".format(file_path) return content def get_file_list(file_path): full_path = os.path.join(package_directory, file_path) file_list = os.listdir(full_path) # now we need to add the path to the files return [ file_path + f for f in file_list ] install_requires = ["Flask>=0.10.1", "Flask-Migrate>=1.2.0", "Flask-SQLAlchemy>=2.0", "Flask-Script>=2.0.5", "Jinja2>=2.7.3", "Mako>=0.9.1", "MarkupSafe>=0.23", "PyMySQL>=0.6.6", "Pillow>=2.6.1", "PyJWT>=1.3.0", "PyYAML>=3.11", "Pygments>=2.0.2", "SQLAlchemy>=1.0.5", "Werkzeug>=0.10.4", "alembic>=0.6.7", "argparse>=1.2.1", "bcrypt>=1.1.0", "beautifulsoup4>=4.3.2", "cffi>=0.8.6", "configobj>=5.0.6", "docutils>=0.12", "funcparserlib>=0.3.6", "itsdangerous>=0.24", "ldap3>=2.5", "netaddr>=0.7.12", "passlib>=1.6.2", "pyasn1>=0.4.2", "pyOpenSSL>=0.15.1", "pycparser>=2.10", "pycrypto>=2.6.1", "pyrad>=2.0", "pyusb>=1.0.0b2", "qrcode>=5.1", "requests>=2.7.0", "sqlsoup>=0.9.0", "ecdsa>=0.13", "lxml>=3.3", "python-gnupg>=0.3.8", "defusedxml>=0.4.1", "flask-babel>=0.9", "croniter>=0.3.8" ] # For python 2.6 we need additional dependency importlib try: import importlib except ImportError: install_requires.append('importlib') def get_man_pages(dir): """ Get man pages in a directory. :param dir: :return: list of file names """ files = os.listdir(dir) r_files = [] for file in files: if file.endswith(".1"): r_files.append(dir + "/" + file) return r_files def get_scripts(dir): """ Get files that are executable :param dir: :return: list of file names """ files = os.listdir(dir) r_files = [] for file in files: if os.stat(dir + "/" + file)[stat.ST_MODE] & stat.S_IEXEC: r_files.append(dir + "/" + file) return r_files setup( name='privacyIDEA', version=VERSION, description='privacyIDEA: identity, multifactor authentication (OTP), ' 'authorization, audit', author='privacyidea.org', license='AGPLv3', author_email='cornelius@privacyidea.org', url='http://www.privacyidea.org', keywords='OTP, two factor authentication, management, security', packages=find_packages(), scripts=["pi-manage"] + get_scripts("tools"), extras_require={ 'dev': ["Sphinx>=1.3.1", "sphinxcontrib-httpdomain>=1.3.0"], 'test': ["coverage>=3.7.1", "mock>=1.0.1", "pyparsing>=2.0.3", "nose>=1.3.4", "responses>=0.4.0", "six>=1.8.0"], }, install_requires=install_requires, include_package_data=True, data_files=[('etc/privacyidea/', ['deploy/apache/privacyideaapp.wsgi', 'deploy/privacyidea/dictionary', 'deploy/privacyidea/enckey', 'deploy/privacyidea/private.pem', 'deploy/privacyidea/public.pem']), ('share/man/man1', get_man_pages("tools")), ('lib/privacyidea/authmodules/FreeRADIUS', ["authmodules/FreeRADIUS/LICENSE", "authmodules/FreeRADIUS/privacyidea_radius.pm"]), ('lib/privacyidea/authmodules/OTRS', ["authmodules/OTRS/privacyIDEA.pm"]), ('lib/privacyidea/migrations', ["migrations/alembic.ini", "migrations/env.py", "migrations/README", "migrations/script.py.mako"]), ('lib/privacyidea/migrations/versions', get_file_list("migrations/versions/")) ], classifiers=["Framework :: Flask", "License :: OSI Approved :: " "GNU Affero General Public License v3", "Programming Language :: Python", "Development Status :: 5 - Production/Stable", "Topic :: Internet", "Topic :: Security", "Topic :: System ::" " Systems Administration :: Authentication/Directory" ], #message_extractors={'privacyidea': [ # ('**.py', 'python', None), # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]}, zip_safe=False, long_description=get_file_contents('README.rst') )
./CrossVul/dataset_final_sorted/CWE-20/py/bad_50_8
crossvul-python_data_good_802_0
# -*- coding: utf-8 -*- # Copyright 2014-2015 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import socket import random import smtplib import email.utils import string import twisted.python.log import cgi import urllib import email.utils from sydent.util import time_msec logger = logging.getLogger(__name__) def sendEmail(sydent, templateName, mailTo, substitutions): mailFrom = sydent.cfg.get('email', 'email.from') mailTemplateFile = sydent.cfg.get('email', templateName) myHostname = sydent.cfg.get('email', 'email.hostname') if myHostname == '': myHostname = socket.getfqdn() midRandom = "".join([random.choice(string.ascii_letters) for _ in range(16)]) messageid = "<%d%s@%s>" % (time_msec(), midRandom, myHostname) allSubstitutions = {} allSubstitutions.update(substitutions) allSubstitutions.update({ 'messageid': messageid, 'date': email.utils.formatdate(localtime=False), 'to': mailTo, 'from': mailFrom, }) for k,v in allSubstitutions.items(): allSubstitutions[k] = v.decode('utf8') allSubstitutions[k+"_forhtml"] = cgi.escape(v.decode('utf8')) allSubstitutions[k+"_forurl"] = urllib.quote(v) mailString = open(mailTemplateFile).read().decode('utf8') % allSubstitutions parsedFrom = email.utils.parseaddr(mailFrom)[1] parsedTo = email.utils.parseaddr(mailTo)[1] if parsedFrom == '' or parsedTo == '': logger.info("Couldn't parse from / to address %s / %s", mailFrom, mailTo) raise EmailAddressException() mailServer = sydent.cfg.get('email', 'email.smtphost') mailPort = sydent.cfg.get('email', 'email.smtpport') mailUsername = sydent.cfg.get('email', 'email.smtpusername') mailPassword = sydent.cfg.get('email', 'email.smtppassword') mailTLSMode = sydent.cfg.get('email', 'email.tlsmode') logger.info("Sending mail to %s with mail server: %s" % (mailTo, mailServer,)) try: if mailTLSMode == 'SSL' or mailTLSMode == 'TLS': smtp = smtplib.SMTP_SSL(mailServer, mailPort, myHostname) elif mailTLSMode == 'STARTTLS': smtp = smtplib.SMTP(mailServer, mailPort, myHostname) smtp.starttls() else: smtp = smtplib.SMTP(mailServer, mailPort, myHostname) if mailUsername != '': smtp.login(mailUsername, mailPassword) # We're using the parsing above to do basic validation, but instead of # failing it may munge the address it returns. So we should *not* use # that parsed address, as it may not match any validation done # elsewhere. smtp.sendmail(mailFrom, mailTo, mailString.encode('utf-8')) smtp.quit() except Exception as origException: twisted.python.log.err() ese = EmailSendException() ese.cause = origException raise ese class EmailAddressException(Exception): pass class EmailSendException(Exception): pass
./CrossVul/dataset_final_sorted/CWE-20/py/good_802_0
crossvul-python_data_bad_1740_1
"""A contents manager that uses the local file system for storage.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import io import os import shutil import mimetypes import nbformat from tornado import web from .filecheckpoints import FileCheckpoints from .fileio import FileManagerMixin from .manager import ContentsManager from ipython_genutils.importstring import import_item from traitlets import Any, Unicode, Bool, TraitError from ipython_genutils.py3compat import getcwd, string_types from . import tz from notebook.utils import ( is_hidden, to_api_path, ) _script_exporter = None def _post_save_script(model, os_path, contents_manager, **kwargs): """convert notebooks to Python script after save with nbconvert replaces `ipython notebook --script` """ from nbconvert.exporters.script import ScriptExporter if model['type'] != 'notebook': return global _script_exporter if _script_exporter is None: _script_exporter = ScriptExporter(parent=contents_manager) log = contents_manager.log base, ext = os.path.splitext(os_path) py_fname = base + '.py' script, resources = _script_exporter.from_filename(os_path) script_fname = base + resources.get('output_extension', '.txt') log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir)) with io.open(script_fname, 'w', encoding='utf-8') as f: f.write(script) class FileContentsManager(FileManagerMixin, ContentsManager): root_dir = Unicode(config=True) def _root_dir_default(self): try: return self.parent.notebook_dir except AttributeError: return getcwd() save_script = Bool(False, config=True, help='DEPRECATED, use post_save_hook') def _save_script_changed(self): self.log.warn(""" `--script` is deprecated. You can trigger nbconvert via pre- or post-save hooks: ContentsManager.pre_save_hook FileContentsManager.post_save_hook A post-save hook has been registered that calls: ipython nbconvert --to script [notebook] which behaves similarly to `--script`. """) self.post_save_hook = _post_save_script post_save_hook = Any(None, config=True, help="""Python callable or importstring thereof to be called on the path of a file just saved. This can be used to process the file on disk, such as converting the notebook to a script or HTML via nbconvert. It will be called as (all arguments passed by keyword):: hook(os_path=os_path, model=model, contents_manager=instance) - path: the filesystem path to the file just written - model: the model representing the file - contents_manager: this ContentsManager instance """ ) def _post_save_hook_changed(self, name, old, new): if new and isinstance(new, string_types): self.post_save_hook = import_item(self.post_save_hook) elif new: if not callable(new): raise TraitError("post_save_hook must be callable") def run_post_save_hook(self, model, os_path): """Run the post-save hook if defined, and log errors""" if self.post_save_hook: try: self.log.debug("Running post-save hook on %s", os_path) self.post_save_hook(os_path=os_path, model=model, contents_manager=self) except Exception: self.log.error("Post-save hook failed on %s", os_path, exc_info=True) def _root_dir_changed(self, name, old, new): """Do a bit of validation of the root_dir.""" if not os.path.isabs(new): # If we receive a non-absolute path, make it absolute. self.root_dir = os.path.abspath(new) return if not os.path.isdir(new): raise TraitError("%r is not a directory" % new) def _checkpoints_class_default(self): return FileCheckpoints def is_hidden(self, path): """Does the API style path correspond to a hidden directory or file? Parameters ---------- path : string The path to check. This is an API path (`/` separated, relative to root_dir). Returns ------- hidden : bool Whether the path exists and is hidden. """ path = path.strip('/') os_path = self._get_os_path(path=path) return is_hidden(os_path, self.root_dir) def file_exists(self, path): """Returns True if the file exists, else returns False. API-style wrapper for os.path.isfile Parameters ---------- path : string The relative path to the file (with '/' as separator) Returns ------- exists : bool Whether the file exists. """ path = path.strip('/') os_path = self._get_os_path(path) return os.path.isfile(os_path) def dir_exists(self, path): """Does the API-style path refer to an extant directory? API-style wrapper for os.path.isdir Parameters ---------- path : string The path to check. This is an API path (`/` separated, relative to root_dir). Returns ------- exists : bool Whether the path is indeed a directory. """ path = path.strip('/') os_path = self._get_os_path(path=path) return os.path.isdir(os_path) def exists(self, path): """Returns True if the path exists, else returns False. API-style wrapper for os.path.exists Parameters ---------- path : string The API path to the file (with '/' as separator) Returns ------- exists : bool Whether the target exists. """ path = path.strip('/') os_path = self._get_os_path(path=path) return os.path.exists(os_path) def _base_model(self, path): """Build the common base of a contents model""" os_path = self._get_os_path(path) info = os.stat(os_path) last_modified = tz.utcfromtimestamp(info.st_mtime) created = tz.utcfromtimestamp(info.st_ctime) # Create the base model. model = {} model['name'] = path.rsplit('/', 1)[-1] model['path'] = path model['last_modified'] = last_modified model['created'] = created model['content'] = None model['format'] = None model['mimetype'] = None try: model['writable'] = os.access(os_path, os.W_OK) except OSError: self.log.error("Failed to check write permissions on %s", os_path) model['writable'] = False return model def _dir_model(self, path, content=True): """Build a model for a directory if content is requested, will include a listing of the directory """ os_path = self._get_os_path(path) four_o_four = u'directory does not exist: %r' % path if not os.path.isdir(os_path): raise web.HTTPError(404, four_o_four) elif is_hidden(os_path, self.root_dir): self.log.info("Refusing to serve hidden directory %r, via 404 Error", os_path ) raise web.HTTPError(404, four_o_four) model = self._base_model(path) model['type'] = 'directory' if content: model['content'] = contents = [] os_dir = self._get_os_path(path) for name in os.listdir(os_dir): os_path = os.path.join(os_dir, name) # skip over broken symlinks in listing if not os.path.exists(os_path): self.log.warn("%s doesn't exist", os_path) continue elif not os.path.isfile(os_path) and not os.path.isdir(os_path): self.log.debug("%s not a regular file", os_path) continue if self.should_list(name) and not is_hidden(os_path, self.root_dir): contents.append(self.get( path='%s/%s' % (path, name), content=False) ) model['format'] = 'json' return model def _file_model(self, path, content=True, format=None): """Build a model for a file if content is requested, include the file contents. format: If 'text', the contents will be decoded as UTF-8. If 'base64', the raw bytes contents will be encoded as base64. If not specified, try to decode as UTF-8, and fall back to base64 """ model = self._base_model(path) model['type'] = 'file' os_path = self._get_os_path(path) if content: content, format = self._read_file(os_path, format) default_mime = { 'text': 'text/plain', 'base64': 'application/octet-stream' }[format] model.update( content=content, format=format, mimetype=mimetypes.guess_type(os_path)[0] or default_mime, ) return model def _notebook_model(self, path, content=True): """Build a notebook model if content is requested, the notebook content will be populated as a JSON structure (not double-serialized) """ model = self._base_model(path) model['type'] = 'notebook' if content: os_path = self._get_os_path(path) nb = self._read_notebook(os_path, as_version=4) self.mark_trusted_cells(nb, path) model['content'] = nb model['format'] = 'json' self.validate_notebook_model(model) return model def get(self, path, content=True, type=None, format=None): """ Takes a path for an entity and returns its model Parameters ---------- path : str the API path that describes the relative path for the target content : bool Whether to include the contents in the reply type : str, optional The requested type - 'file', 'notebook', or 'directory'. Will raise HTTPError 400 if the content doesn't match. format : str, optional The requested format for file contents. 'text' or 'base64'. Ignored if this returns a notebook or directory model. Returns ------- model : dict the contents model. If content=True, returns the contents of the file or directory as well. """ path = path.strip('/') if not self.exists(path): raise web.HTTPError(404, u'No such file or directory: %s' % path) os_path = self._get_os_path(path) if os.path.isdir(os_path): if type not in (None, 'directory'): raise web.HTTPError(400, u'%s is a directory, not a %s' % (path, type), reason='bad type') model = self._dir_model(path, content=content) elif type == 'notebook' or (type is None and path.endswith('.ipynb')): model = self._notebook_model(path, content=content) else: if type == 'directory': raise web.HTTPError(400, u'%s is not a directory' % path, reason='bad type') model = self._file_model(path, content=content, format=format) return model def _save_directory(self, os_path, model, path=''): """create a directory""" if is_hidden(os_path, self.root_dir): raise web.HTTPError(400, u'Cannot create hidden directory %r' % os_path) if not os.path.exists(os_path): with self.perm_to_403(): os.mkdir(os_path) elif not os.path.isdir(os_path): raise web.HTTPError(400, u'Not a directory: %s' % (os_path)) else: self.log.debug("Directory %r already exists", os_path) def save(self, model, path=''): """Save the file model and return the model with no content.""" path = path.strip('/') if 'type' not in model: raise web.HTTPError(400, u'No file type provided') if 'content' not in model and model['type'] != 'directory': raise web.HTTPError(400, u'No file content provided') os_path = self._get_os_path(path) self.log.debug("Saving %s", os_path) self.run_pre_save_hook(model=model, path=path) try: if model['type'] == 'notebook': nb = nbformat.from_dict(model['content']) self.check_and_sign(nb, path) self._save_notebook(os_path, nb) # One checkpoint should always exist for notebooks. if not self.checkpoints.list_checkpoints(path): self.create_checkpoint(path) elif model['type'] == 'file': # Missing format will be handled internally by _save_file. self._save_file(os_path, model['content'], model.get('format')) elif model['type'] == 'directory': self._save_directory(os_path, model, path) else: raise web.HTTPError(400, "Unhandled contents type: %s" % model['type']) except web.HTTPError: raise except Exception as e: self.log.error(u'Error while saving file: %s %s', path, e, exc_info=True) raise web.HTTPError(500, u'Unexpected error while saving file: %s %s' % (path, e)) validation_message = None if model['type'] == 'notebook': self.validate_notebook_model(model) validation_message = model.get('message', None) model = self.get(path, content=False) if validation_message: model['message'] = validation_message self.run_post_save_hook(model=model, os_path=os_path) return model def delete_file(self, path): """Delete file at path.""" path = path.strip('/') os_path = self._get_os_path(path) rm = os.unlink if os.path.isdir(os_path): listing = os.listdir(os_path) # Don't delete non-empty directories. # A directory containing only leftover checkpoints is # considered empty. cp_dir = getattr(self.checkpoints, 'checkpoint_dir', None) for entry in listing: if entry != cp_dir: raise web.HTTPError(400, u'Directory %s not empty' % os_path) elif not os.path.isfile(os_path): raise web.HTTPError(404, u'File does not exist: %s' % os_path) if os.path.isdir(os_path): self.log.debug("Removing directory %s", os_path) with self.perm_to_403(): shutil.rmtree(os_path) else: self.log.debug("Unlinking file %s", os_path) with self.perm_to_403(): rm(os_path) def rename_file(self, old_path, new_path): """Rename a file.""" old_path = old_path.strip('/') new_path = new_path.strip('/') if new_path == old_path: return new_os_path = self._get_os_path(new_path) old_os_path = self._get_os_path(old_path) # Should we proceed with the move? if os.path.exists(new_os_path): raise web.HTTPError(409, u'File already exists: %s' % new_path) # Move the file try: with self.perm_to_403(): shutil.move(old_os_path, new_os_path) except web.HTTPError: raise except Exception as e: raise web.HTTPError(500, u'Unknown error renaming file: %s %s' % (old_path, e)) def info_string(self): return "Serving notebooks from local directory: %s" % self.root_dir def get_kernel_path(self, path, model=None): """Return the initial API path of a kernel associated with a given notebook""" if '/' in path: parent_dir = path.rsplit('/', 1)[0] else: parent_dir = '' return parent_dir
./CrossVul/dataset_final_sorted/CWE-20/py/bad_1740_1
crossvul-python_data_good_3500_3
import Bcfg2.Server.Plugin import Bcfg2.Options import lxml.etree import posixpath import tempfile import pipes import os from subprocess import Popen, PIPE, STDOUT # Compatibility import from Bcfg2.Bcfg2Py3k import ConfigParser class SSLCA(Bcfg2.Server.Plugin.GroupSpool): """ The SSLCA generator handles the creation and management of ssl certificates and their keys. """ name = 'SSLCA' __version__ = '$Id:$' __author__ = 'g.hagger@gmail.com' __child__ = Bcfg2.Server.Plugin.FileBacked key_specs = {} cert_specs = {} CAs = {} def HandleEvent(self, event=None): """ Updates which files this plugin handles based upon filesystem events. Allows configuration items to be added/removed without server restarts. """ action = event.code2str() if event.filename[0] == '/': return epath = "".join([self.data, self.handles[event.requestID], event.filename]) if posixpath.isdir(epath): ident = self.handles[event.requestID] + event.filename else: ident = self.handles[event.requestID][:-1] fname = "".join([ident, '/', event.filename]) if event.filename.endswith('.xml'): if action in ['exists', 'created', 'changed']: if event.filename.endswith('key.xml'): key_spec = dict(list(lxml.etree.parse(epath).find('Key').items())) self.key_specs[ident] = { 'bits': key_spec.get('bits', 2048), 'type': key_spec.get('type', 'rsa') } self.Entries['Path'][ident] = self.get_key elif event.filename.endswith('cert.xml'): cert_spec = dict(list(lxml.etree.parse(epath).find('Cert').items())) ca = cert_spec.get('ca', 'default') self.cert_specs[ident] = { 'ca': ca, 'format': cert_spec.get('format', 'pem'), 'key': cert_spec.get('key'), 'days': cert_spec.get('days', 365), 'C': cert_spec.get('c'), 'L': cert_spec.get('l'), 'ST': cert_spec.get('st'), 'OU': cert_spec.get('ou'), 'O': cert_spec.get('o'), 'emailAddress': cert_spec.get('emailaddress') } cp = ConfigParser() cp.read(self.core.cfile) self.CAs[ca] = dict(cp.items('sslca_' + ca)) self.Entries['Path'][ident] = self.get_cert if action == 'deleted': if ident in self.Entries['Path']: del self.Entries['Path'][ident] else: if action in ['exists', 'created']: if posixpath.isdir(epath): self.AddDirectoryMonitor(epath[len(self.data):]) if ident not in self.entries and posixpath.isfile(epath): self.entries[fname] = self.__child__(epath) self.entries[fname].HandleEvent(event) if action == 'changed': self.entries[fname].HandleEvent(event) elif action == 'deleted': if fname in self.entries: del self.entries[fname] else: self.entries[fname].HandleEvent(event) def get_key(self, entry, metadata): """ either grabs a prexisting key hostfile, or triggers the generation of a new key if one doesn't exist. """ # set path type and permissions, otherwise bcfg2 won't bind the file permdata = {'owner': 'root', 'group': 'root', 'type': 'file', 'perms': '644'} [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] # check if we already have a hostfile, or need to generate a new key # TODO: verify key fits the specs path = entry.get('name') filename = "".join([path, '/', path.rsplit('/', 1)[1], '.H_', metadata.hostname]) if filename not in list(self.entries.keys()): key = self.build_key(filename, entry, metadata) open(self.data + filename, 'w').write(key) entry.text = key self.entries[filename] = self.__child__("%s%s" % (self.data, filename)) self.entries[filename].HandleEvent() else: entry.text = self.entries[filename].data def build_key(self, filename, entry, metadata): """ generates a new key according the the specification """ type = self.key_specs[entry.get('name')]['type'] bits = self.key_specs[entry.get('name')]['bits'] if type == 'rsa': cmd = ["openssl", "genrsa", bits] elif type == 'dsa': cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits] key = Popen(cmd, stdout=PIPE).stdout.read() return key def get_cert(self, entry, metadata): """ either grabs a prexisting cert hostfile, or triggers the generation of a new cert if one doesn't exist. """ # set path type and permissions, otherwise bcfg2 won't bind the file permdata = {'owner': 'root', 'group': 'root', 'type': 'file', 'perms': '644'} [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] path = entry.get('name') filename = "".join([path, '/', path.rsplit('/', 1)[1], '.H_', metadata.hostname]) # first - ensure we have a key to work with key = self.cert_specs[entry.get('name')].get('key') key_filename = "".join([key, '/', key.rsplit('/', 1)[1], '.H_', metadata.hostname]) if key_filename not in self.entries: e = lxml.etree.Element('Path') e.attrib['name'] = key self.core.Bind(e, metadata) # check if we have a valid hostfile if filename in list(self.entries.keys()) and self.verify_cert(filename, key_filename, entry): entry.text = self.entries[filename].data else: cert = self.build_cert(key_filename, entry, metadata) open(self.data + filename, 'w').write(cert) self.entries[filename] = self.__child__("%s%s" % (self.data, filename)) self.entries[filename].HandleEvent() entry.text = cert def verify_cert(self, filename, key_filename, entry): if self.verify_cert_against_ca(filename, entry): if self.verify_cert_against_key(filename, key_filename): return True return False def verify_cert_against_ca(self, filename, entry): """ check that a certificate validates against the ca cert, and that it has not expired. """ chaincert = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert') cert = self.data + filename res = Popen(["openssl", "verify", "-CAfile", chaincert, cert], stdout=PIPE, stderr=STDOUT).stdout.read() if res == cert + ": OK\n": return True return False def verify_cert_against_key(self, filename, key_filename): """ check that a certificate validates against its private key. """ cert = self.data + filename key = self.data + key_filename cmd = ("openssl x509 -noout -modulus -in %s | openssl md5" % pipes.quote(cert)) cert_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read() cmd = ("openssl rsa -noout -modulus -in %s | openssl md5" % pipes.quote(key)) key_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read() if cert_md5 == key_md5: return True return False def build_cert(self, key_filename, entry, metadata): """ creates a new certificate according to the specification """ req_config = self.build_req_config(entry, metadata) req = self.build_request(key_filename, req_config, entry) ca = self.cert_specs[entry.get('name')]['ca'] ca_config = self.CAs[ca]['config'] days = self.cert_specs[entry.get('name')]['days'] passphrase = self.CAs[ca].get('passphrase') cmd = ["openssl", "ca", "-config", ca_config, "-in", req, "-days", days, "-batch"] if passphrase: cmd.extend(["-passin", "pass:%s" % passphrase]) cert = Popen(cmd, stdout=PIPE).stdout.read() try: os.unlink(req_config) os.unlink(req) except OSError: self.logger.error("Failed to unlink temporary files") return cert def build_req_config(self, entry, metadata): """ generates a temporary openssl configuration file that is used to generate the required certificate request """ # create temp request config file conffile = open(tempfile.mkstemp()[1], 'w') cp = ConfigParser({}) cp.optionxform = str defaults = { 'req': { 'default_md': 'sha1', 'distinguished_name': 'req_distinguished_name', 'req_extensions': 'v3_req', 'x509_extensions': 'v3_req', 'prompt': 'no' }, 'req_distinguished_name': {}, 'v3_req': { 'subjectAltName': '@alt_names' }, 'alt_names': {} } for section in list(defaults.keys()): cp.add_section(section) for key in defaults[section]: cp.set(section, key, defaults[section][key]) x = 1 altnames = list(metadata.aliases) altnames.append(metadata.hostname) for altname in altnames: cp.set('alt_names', 'DNS.' + str(x), altname) x += 1 for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']: if self.cert_specs[entry.get('name')][item]: cp.set('req_distinguished_name', item, self.cert_specs[entry.get('name')][item]) cp.set('req_distinguished_name', 'CN', metadata.hostname) cp.write(conffile) conffile.close() return conffile.name def build_request(self, key_filename, req_config, entry): """ creates the certificate request """ req = tempfile.mkstemp()[1] days = self.cert_specs[entry.get('name')]['days'] key = self.data + key_filename cmd = ["openssl", "req", "-new", "-config", req_config, "-days", days, "-key", key, "-text", "-out", req] res = Popen(cmd, shell=True, stdout=PIPE).stdout.read() return req
./CrossVul/dataset_final_sorted/CWE-20/py/good_3500_3
crossvul-python_data_good_3499_1
"""This module implements a config file repository.""" __revision__ = '$Revision$' import binascii import logging import lxml import os import re import tempfile from subprocess import Popen, PIPE import Bcfg2.Server.Plugin logger = logging.getLogger('Bcfg2.Plugins.Cfg') def process_delta(data, delta): if not delta.specific.delta: return data if delta.specific.delta == 'cat': datalines = data.split('\n') for line in delta.data.split('\n'): if not line: continue if line[0] == '+': datalines.append(line[1:]) elif line[0] == '-': if line[1:] in datalines: datalines.remove(line[1:]) return "\n".join(datalines) elif delta.specific.delta == 'diff': basehandle, basename = tempfile.mkstemp() basefile = open(basename, 'w') basefile.write(data) basefile.close() os.close(basehandle) cmd = ["patch", "-u", "-f", basefile.name] patch = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) stderr = patch.communicate(input=delta.data)[1] ret = patch.wait() output = open(basefile.name, 'r').read() os.unlink(basefile.name) if ret >> 8 != 0: logger.error("Error applying diff %s: %s" % (delta.name, stderr)) raise Bcfg2.Server.Plugin.PluginExecutionError('delta', delta) return output class CfgMatcher: def __init__(self, fname): name = re.escape(fname) self.basefile_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+)|.G(?P<prio>\d+)_(?P<group>\S+))$' % name) self.delta_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+)|\\.G(?P<prio>\d+)_(?P<group>\S+))\\.(?P<delta>(cat|diff))$' % name) self.cat_count = fname.count(".cat") self.diff_count = fname.count(".diff") def match(self, fname): if fname.count(".cat") > self.cat_count \ or fname.count('.diff') > self.diff_count: return self.delta_reg.match(fname) return self.basefile_reg.match(fname) class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): def __init__(self, basename, path, entry_type, encoding): Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, entry_type, encoding) self.specific = CfgMatcher(path.split('/')[-1]) def sort_by_specific(self, one, other): return cmp(one.specific, other.specific) def get_pertinent_entries(self, metadata): '''return a list of all entries pertinent to a client => [base, delta1, delta2]''' matching = [ent for ent in self.entries.values() if \ ent.specific.matches(metadata)] matching.sort(self.sort_by_specific) non_delta = [matching.index(m) for m in matching if not m.specific.delta] if not non_delta: raise Bcfg2.Server.Plugin.PluginExecutionError base = min(non_delta) used = matching[:base+1] used.reverse() return used def bind_entry(self, entry, metadata): self.bind_info_to_entry(entry, metadata) used = self.get_pertinent_entries(metadata) basefile = used.pop(0) data = basefile.data if entry.tag == 'Path': entry.set('type', 'file') for delta in used: data = data.strip() data = process_delta(data, delta) if used: data += '\n' if entry.get('encoding') == 'base64': entry.text = binascii.b2a_base64(data) else: entry.text = unicode(data, self.encoding) if entry.text in ['', None]: entry.set('empty', 'true') def list_accept_choices(self, metadata): '''return a list of candidate pull locations''' used = self.get_pertinent_entries(metadata) ret = [] if used: ret.append(used[0].specific) if not ret[0].hostname: ret.append(Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)) return ret def build_filename(self, specific): bfname = self.path + '/' + self.path.split('/')[-1] if specific.all: return bfname elif specific.group: return "%s.G%d_%s" % (bfname, specific.prio, specific.group) elif specific.hostname: return "%s.H_%s" % (bfname, specific.hostname) def write_update(self, specific, new_entry, log): if 'text' in new_entry: name = self.build_filename(specific) open(name, 'w').write(new_entry['text']) if log: logger.info("Wrote file %s" % name) badattr = [attr for attr in ['owner', 'group', 'perms'] if attr in new_entry] if badattr: metadata_updates = {} metadata_updates.update(self.metadata) for attr in badattr: metadata_updates[attr] = new_entry.get(attr) if self.infoxml: infoxml = lxml.etree.Element('FileInfo') infotag = lxml.etree.SubElement(infoxml, 'Info') [infotag.attrib.__setitem__(attr, metadata_updates[attr]) \ for attr in metadata_updates] ofile = open(self.path + "/info.xml","w") ofile.write(lxml.etree.tostring(infoxml, pretty_print=True)) ofile.close() if log: logger.info("Wrote file %s" % (self.path + "/info.xml")) else: infofile = open(self.path + '/:info', 'w') for x in metadata_updates.iteritems(): infofile.write("%s: %s\n" % x) infofile.close() if log: logger.info("Wrote file %s" % infofile.name) class Cfg(Bcfg2.Server.Plugin.GroupSpool, Bcfg2.Server.Plugin.PullTarget): """This generator in the configuration file repository for Bcfg2.""" name = 'Cfg' __version__ = '$Id$' __author__ = 'bcfg-dev@mcs.anl.gov' es_cls = CfgEntrySet es_child_cls = Bcfg2.Server.Plugin.SpecificData def AcceptChoices(self, entry, metadata): return self.entries[entry.get('name')].list_accept_choices(metadata) def AcceptPullData(self, specific, new_entry, log): return self.entries[new_entry.get('name')].write_update(specific, new_entry, log)
./CrossVul/dataset_final_sorted/CWE-20/py/good_3499_1
crossvul-python_data_good_3499_2
import os from mercurial import ui, hg import Bcfg2.Server.Plugin # for debugging output only import logging logger = logging.getLogger('Bcfg2.Plugins.Mercurial') class Hg(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Version): """Mercurial is a version plugin for dealing with Bcfg2 repository.""" name = 'Mercurial' __version__ = '$Id$' __author__ = 'bcfg-dev@mcs.anl.gov' experimental = True def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Version.__init__(self) self.core = core self.datastore = datastore # path to hg directory for Bcfg2 repo hg_dir = "%s/.hg" % datastore # Read changeset from bcfg2 repo if os.path.isdir(hg_dir): self.get_revision() else: logger.error("%s is not present." % hg_dir) raise Bcfg2.Server.Plugin.PluginInitError logger.debug("Initialized hg plugin with hg directory = %s" % hg_dir) def get_revision(self): """Read hg revision information for the Bcfg2 repository.""" try: repo_path = "%s/" % self.datastore repo = hg.repository(ui.ui(), repo_path) tip = repo.changelog.tip() revision = repo.changelog.rev(tip) except: logger.error("Failed to read hg repository; disabling mercurial support") raise Bcfg2.Server.Plugin.PluginInitError return revision
./CrossVul/dataset_final_sorted/CWE-20/py/good_3499_2
crossvul-python_data_good_117_3
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import abc import six from cryptography import utils @six.add_metaclass(abc.ABCMeta) class Mode(object): @abc.abstractproperty def name(self): """ A string naming this mode (e.g. "ECB", "CBC"). """ @abc.abstractmethod def validate_for_algorithm(self, algorithm): """ Checks that all the necessary invariants of this (mode, algorithm) combination are met. """ @six.add_metaclass(abc.ABCMeta) class ModeWithInitializationVector(object): @abc.abstractproperty def initialization_vector(self): """ The value of the initialization vector for this mode as bytes. """ @six.add_metaclass(abc.ABCMeta) class ModeWithTweak(object): @abc.abstractproperty def tweak(self): """ The value of the tweak for this mode as bytes. """ @six.add_metaclass(abc.ABCMeta) class ModeWithNonce(object): @abc.abstractproperty def nonce(self): """ The value of the nonce for this mode as bytes. """ @six.add_metaclass(abc.ABCMeta) class ModeWithAuthenticationTag(object): @abc.abstractproperty def tag(self): """ The value of the tag supplied to the constructor of this mode. """ def _check_aes_key_length(self, algorithm): if algorithm.key_size > 256 and algorithm.name == "AES": raise ValueError( "Only 128, 192, and 256 bit keys are allowed for this AES mode" ) def _check_iv_length(self, algorithm): if len(self.initialization_vector) * 8 != algorithm.block_size: raise ValueError("Invalid IV size ({0}) for {1}.".format( len(self.initialization_vector), self.name )) def _check_iv_and_key_length(self, algorithm): _check_aes_key_length(self, algorithm) _check_iv_length(self, algorithm) @utils.register_interface(Mode) @utils.register_interface(ModeWithInitializationVector) class CBC(object): name = "CBC" def __init__(self, initialization_vector): if not isinstance(initialization_vector, bytes): raise TypeError("initialization_vector must be bytes") self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") validate_for_algorithm = _check_iv_and_key_length @utils.register_interface(Mode) @utils.register_interface(ModeWithTweak) class XTS(object): name = "XTS" def __init__(self, tweak): if not isinstance(tweak, bytes): raise TypeError("tweak must be bytes") if len(tweak) != 16: raise ValueError("tweak must be 128-bits (16 bytes)") self._tweak = tweak tweak = utils.read_only_property("_tweak") def validate_for_algorithm(self, algorithm): if algorithm.key_size not in (256, 512): raise ValueError( "The XTS specification requires a 256-bit key for AES-128-XTS" " and 512-bit key for AES-256-XTS" ) @utils.register_interface(Mode) class ECB(object): name = "ECB" validate_for_algorithm = _check_aes_key_length @utils.register_interface(Mode) @utils.register_interface(ModeWithInitializationVector) class OFB(object): name = "OFB" def __init__(self, initialization_vector): if not isinstance(initialization_vector, bytes): raise TypeError("initialization_vector must be bytes") self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") validate_for_algorithm = _check_iv_and_key_length @utils.register_interface(Mode) @utils.register_interface(ModeWithInitializationVector) class CFB(object): name = "CFB" def __init__(self, initialization_vector): if not isinstance(initialization_vector, bytes): raise TypeError("initialization_vector must be bytes") self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") validate_for_algorithm = _check_iv_and_key_length @utils.register_interface(Mode) @utils.register_interface(ModeWithInitializationVector) class CFB8(object): name = "CFB8" def __init__(self, initialization_vector): if not isinstance(initialization_vector, bytes): raise TypeError("initialization_vector must be bytes") self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") validate_for_algorithm = _check_iv_and_key_length @utils.register_interface(Mode) @utils.register_interface(ModeWithNonce) class CTR(object): name = "CTR" def __init__(self, nonce): if not isinstance(nonce, bytes): raise TypeError("nonce must be bytes") self._nonce = nonce nonce = utils.read_only_property("_nonce") def validate_for_algorithm(self, algorithm): _check_aes_key_length(self, algorithm) if len(self.nonce) * 8 != algorithm.block_size: raise ValueError("Invalid nonce size ({0}) for {1}.".format( len(self.nonce), self.name )) @utils.register_interface(Mode) @utils.register_interface(ModeWithInitializationVector) @utils.register_interface(ModeWithAuthenticationTag) class GCM(object): name = "GCM" _MAX_ENCRYPTED_BYTES = (2 ** 39 - 256) // 8 _MAX_AAD_BYTES = (2 ** 64) // 8 def __init__(self, initialization_vector, tag=None, min_tag_length=16): # len(initialization_vector) must in [1, 2 ** 64), but it's impossible # to actually construct a bytes object that large, so we don't check # for it if not isinstance(initialization_vector, bytes): raise TypeError("initialization_vector must be bytes") self._initialization_vector = initialization_vector if tag is not None: if not isinstance(tag, bytes): raise TypeError("tag must be bytes or None") if min_tag_length < 4: raise ValueError("min_tag_length must be >= 4") if len(tag) < min_tag_length: raise ValueError( "Authentication tag must be {0} bytes or longer.".format( min_tag_length) ) self._tag = tag self._min_tag_length = min_tag_length tag = utils.read_only_property("_tag") initialization_vector = utils.read_only_property("_initialization_vector") def validate_for_algorithm(self, algorithm): _check_aes_key_length(self, algorithm)
./CrossVul/dataset_final_sorted/CWE-20/py/good_117_3
crossvul-python_data_good_3768_3
import datetime import os import re import time from pprint import pformat from urllib import urlencode, quote from urlparse import urljoin, urlparse try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: # The mod_python version is more efficient, so try importing it first. from mod_python.util import parse_qsl except ImportError: try: # Python 2.6 and greater from urlparse import parse_qsl except ImportError: # Python 2.5, 2.4. Works on Python 2.6 but raises # PendingDeprecationWarning from cgi import parse_qsl import Cookie # httponly support exists in Python 2.6's Cookie library, # but not in Python 2.4 or 2.5. _morsel_supports_httponly = Cookie.Morsel._reserved.has_key('httponly') # Some versions of Python 2.7 and later won't need this encoding bug fix: _cookie_encodes_correctly = Cookie.SimpleCookie().value_encode(';') == (';', '"\\073"') # See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256 _tc = Cookie.SimpleCookie() _tc.load('f:oo') _cookie_allows_colon_in_names = 'Set-Cookie: f:oo=' in _tc.output() if _morsel_supports_httponly and _cookie_encodes_correctly and _cookie_allows_colon_in_names: SimpleCookie = Cookie.SimpleCookie else: if not _morsel_supports_httponly: class Morsel(Cookie.Morsel): def __setitem__(self, K, V): K = K.lower() if K == "httponly": if V: # The superclass rejects httponly as a key, # so we jump to the grandparent. super(Cookie.Morsel, self).__setitem__(K, V) else: super(Morsel, self).__setitem__(K, V) def OutputString(self, attrs=None): output = super(Morsel, self).OutputString(attrs) if "httponly" in self: output += "; httponly" return output class SimpleCookie(Cookie.SimpleCookie): if not _morsel_supports_httponly: def __set(self, key, real_value, coded_value): M = self.get(key, Morsel()) M.set(key, real_value, coded_value) dict.__setitem__(self, key, M) def __setitem__(self, key, value): rval, cval = self.value_encode(value) self.__set(key, rval, cval) if not _cookie_encodes_correctly: def value_encode(self, val): # Some browsers do not support quoted-string from RFC 2109, # including some versions of Safari and Internet Explorer. # These browsers split on ';', and some versions of Safari # are known to split on ', '. Therefore, we encode ';' and ',' # SimpleCookie already does the hard work of encoding and decoding. # It uses octal sequences like '\\012' for newline etc. # and non-ASCII chars. We just make use of this mechanism, to # avoid introducing two encoding schemes which would be confusing # and especially awkward for javascript. # NB, contrary to Python docs, value_encode returns a tuple containing # (real val, encoded_val) val, encoded = super(SimpleCookie, self).value_encode(val) encoded = encoded.replace(";", "\\073").replace(",","\\054") # If encoded now contains any quoted chars, we need double quotes # around the whole string. if "\\" in encoded and not encoded.startswith('"'): encoded = '"' + encoded + '"' return val, encoded if not _cookie_allows_colon_in_names: def load(self, rawdata, ignore_parse_errors=False): if ignore_parse_errors: self.bad_cookies = set() self._BaseCookie__set = self._loose_set super(SimpleCookie, self).load(rawdata) if ignore_parse_errors: self._BaseCookie__set = self._strict_set for key in self.bad_cookies: del self[key] _strict_set = Cookie.BaseCookie._BaseCookie__set def _loose_set(self, key, real_value, coded_value): try: self._strict_set(key, real_value, coded_value) except Cookie.CookieError: self.bad_cookies.add(key) dict.__setitem__(self, key, Cookie.Morsel()) class CompatCookie(SimpleCookie): def __init__(self, *args, **kwargs): super(CompatCookie, self).__init__(*args, **kwargs) import warnings warnings.warn("CompatCookie is deprecated, use django.http.SimpleCookie instead.", PendingDeprecationWarning) from django.core.exceptions import SuspiciousOperation from django.utils.datastructures import MultiValueDict, ImmutableList from django.utils.encoding import smart_str, iri_to_uri, force_unicode from django.utils.http import cookie_date from django.http.multipartparser import MultiPartParser from django.conf import settings from django.core.files import uploadhandler from utils import * RESERVED_CHARS="!*'();:@&=+$,/?%#[]" absolute_http_url_re = re.compile(r"^https?://", re.I) class Http404(Exception): pass class HttpRequest(object): """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {} self.path = '' self.path_info = '' self.method = None def __repr__(self): return '<HttpRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \ (pformat(self.GET), pformat(self.POST), pformat(self.COOKIES), pformat(self.META)) def get_host(self): """Returns the HTTP host using the environment or request headers.""" # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = str(self.META['SERVER_PORT']) if server_port != (self.is_secure() and '443' or '80'): host = '%s:%s' % (host, server_port) # Disallow potentially poisoned hostnames. if set(';/?@&=+$,').intersection(host): raise SuspiciousOperation('Invalid HTTP_HOST header: %s' % host) return host def get_full_path(self): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '') def build_absolute_uri(self, location=None): """ Builds an absolute URI from the location and the variables available in this request. If no location is specified, the absolute URI is built on ``request.get_full_path()``. """ if not location: location = self.get_full_path() if not absolute_http_url_re.match(location): current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http', self.get_host(), self.path) location = urljoin(current_uri, location) return iri_to_uri(location) def is_secure(self): return os.environ.get("HTTPS") == "on" def is_ajax(self): return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' def _set_encoding(self, val): """ Sets the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, it is removed and recreated on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, '_get'): del self._get if hasattr(self, '_post'): del self._post def _get_encoding(self): return self._encoding encoding = property(_get_encoding, _set_encoding) def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] def _set_upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def _get_upload_handlers(self): if not self._upload_handlers: # If thre are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers upload_handlers = property(_get_upload_handlers, _set_upload_handlers) def parse_file_upload(self, META, post_data): """Returns a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning = "You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() def _get_raw_post_data(self): if not hasattr(self, '_raw_post_data'): if self._read_started: raise Exception("You cannot access raw_post_data after reading from request's data stream") try: content_length = int(self.META.get('CONTENT_LENGTH', 0)) except (ValueError, TypeError): # If CONTENT_LENGTH was empty string or not an integer, don't # error out. We've also seen None passed in here (against all # specs, but see ticket #8259), so we handle TypeError as well. content_length = 0 if content_length: self._raw_post_data = self.read(content_length) else: self._raw_post_data = self.read() self._stream = StringIO(self._raw_post_data) return self._raw_post_data raw_post_data = property(_get_raw_post_data) def _mark_post_parse_error(self): self._post = QueryDict('') self._files = MultiValueDict() self._post_parse_error = True def _load_post_and_files(self): # Populates self._post and self._files if self.method != 'POST': self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_raw_post_data'): self._mark_post_parse_error() return if self.META.get('CONTENT_TYPE', '').startswith('multipart'): if hasattr(self, '_raw_post_data'): # Use already read data data = StringIO(self._raw_post_data) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except: # An error occured while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. # Mark that an error occured. This allows self.__repr__ to # be explicit about it instead of simply representing an # empty POST self._mark_post_parse_error() raise else: self._post, self._files = QueryDict(self.raw_post_data, encoding=self._encoding), MultiValueDict() ## File-like and iterator interface. ## ## Expects self._stream to be set to an appropriate source of bytes by ## a corresponding request subclass (WSGIRequest or ModPythonRequest). ## Also when request data has already been read by request.POST or ## request.raw_post_data, self._stream points to a StringIO instance ## containing that data. def read(self, *args, **kwargs): self._read_started = True return self._stream.read(*args, **kwargs) def readline(self, *args, **kwargs): self._read_started = True return self._stream.readline(*args, **kwargs) def xreadlines(self): while True: buf = self.readline() if not buf: break yield buf __iter__ = xreadlines def readlines(self): return list(iter(self)) class QueryDict(MultiValueDict): """ A specialized MultiValueDict that takes a query string when initialized. This is immutable unless you create a copy of it. Values retrieved from this class are converted from the given encoding (DEFAULT_CHARSET by default) to unicode. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string, mutable=False, encoding=None): MultiValueDict.__init__(self) if not encoding: # *Important*: do not import settings any earlier because of note # in core.handlers.modpython. from django.conf import settings encoding = settings.DEFAULT_CHARSET self.encoding = encoding for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True self.appendlist(force_unicode(key, encoding, errors='replace'), force_unicode(value, encoding, errors='replace')) self._mutable = mutable def _get_encoding(self): if self._encoding is None: # *Important*: do not import settings at the module level because # of the note in core.handlers.modpython. from django.conf import settings self._encoding = settings.DEFAULT_CHARSET return self._encoding def _set_encoding(self, value): self._encoding = value encoding = property(_get_encoding, _set_encoding) def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = str_to_unicode(key, self.encoding) value = str_to_unicode(value, self.encoding) MultiValueDict.__setitem__(self, key, value) def __delitem__(self, key): self._assert_mutable() super(QueryDict, self).__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in dict.items(self): dict.__setitem__(result, key, value) return result def __deepcopy__(self, memo): import django.utils.copycompat as copy result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in dict.items(self): dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = str_to_unicode(key, self.encoding) list_ = [str_to_unicode(elt, self.encoding) for elt in list_] MultiValueDict.setlist(self, key, list_) def setlistdefault(self, key, default_list=()): self._assert_mutable() if key not in self: self.setlist(key, default_list) return MultiValueDict.getlist(self, key) def appendlist(self, key, value): self._assert_mutable() key = str_to_unicode(key, self.encoding) value = str_to_unicode(value, self.encoding) MultiValueDict.appendlist(self, key, value) def update(self, other_dict): self._assert_mutable() f = lambda s: str_to_unicode(s, self.encoding) if hasattr(other_dict, 'lists'): for key, valuelist in other_dict.lists(): for value in valuelist: MultiValueDict.update(self, {f(key): f(value)}) else: d = dict([(f(k), f(v)) for k, v in other_dict.items()]) MultiValueDict.update(self, d) def pop(self, key, *args): self._assert_mutable() return MultiValueDict.pop(self, key, *args) def popitem(self): self._assert_mutable() return MultiValueDict.popitem(self) def clear(self): self._assert_mutable() MultiValueDict.clear(self) def setdefault(self, key, default=None): self._assert_mutable() key = str_to_unicode(key, self.encoding) default = str_to_unicode(default, self.encoding) return MultiValueDict.setdefault(self, key, default) def copy(self): """Returns a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Returns an encoded string of all query string arguments. :arg safe: Used to specify characters which do not require quoting, for example:: >>> q = QueryDict('', mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe))) else: encode = lambda k, v: urlencode({k: v}) for k, list_ in self.lists(): k = smart_str(k, self.encoding) output.extend([encode(k, smart_str(v, self.encoding)) for v in list_]) return '&'.join(output) def parse_cookie(cookie): if cookie == '': return {} if not isinstance(cookie, Cookie.BaseCookie): try: c = SimpleCookie() c.load(cookie, ignore_parse_errors=True) except Cookie.CookieError: # Invalid cookie return {} else: c = cookie cookiedict = {} for key in c.keys(): cookiedict[key] = c.get(key).value return cookiedict class BadHeaderError(ValueError): pass class HttpResponse(object): """A basic HTTP response, with content and dictionary-accessed headers.""" status_code = 200 def __init__(self, content='', mimetype=None, status=None, content_type=None): # _headers is a mapping of the lower-case name to the original case of # the header (required for working with legacy systems) and the header # value. Both the name of the header and its value are ASCII strings. self._headers = {} self._charset = settings.DEFAULT_CHARSET if mimetype: content_type = mimetype # For backwards compatibility if not content_type: content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE, self._charset) if not isinstance(content, basestring) and hasattr(content, '__iter__'): self._container = content self._is_string = False else: self._container = [content] self._is_string = True self.cookies = SimpleCookie() if status: self.status_code = status self['Content-Type'] = content_type def __str__(self): """Full HTTP message, including headers.""" return '\n'.join(['%s: %s' % (key, value) for key, value in self._headers.values()]) \ + '\n\n' + self.content def _convert_to_ascii(self, *values): """Converts all values to ascii strings.""" for value in values: if isinstance(value, unicode): try: value = value.encode('us-ascii') except UnicodeError, e: e.reason += ', HTTP response headers must be in US-ASCII format' raise else: value = str(value) if '\n' in value or '\r' in value: raise BadHeaderError("Header values can't contain newlines (got %r)" % (value)) yield value def __setitem__(self, header, value): header, value = self._convert_to_ascii(header, value) self._headers[header.lower()] = (header, value) def __delitem__(self, header): try: del self._headers[header.lower()] except KeyError: pass def __getitem__(self, header): return self._headers[header.lower()][1] def has_header(self, header): """Case-insensitive check for a header.""" return self._headers.has_key(header.lower()) __contains__ = has_header def items(self): return self._headers.values() def get(self, header, alternate): return self._headers.get(header.lower(), (None, alternate))[1] def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False): """ Sets a cookie. ``expires`` can be a string in the correct format or a ``datetime.datetime`` object in UTC. If ``expires`` is a datetime object then ``max_age`` will be calculated. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): delta = expires - expires.utcnow() # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta = delta + datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]['expires'] = expires if max_age is not None: self.cookies[key]['max-age'] = max_age # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]['expires'] = cookie_date(time.time() + max_age) if path is not None: self.cookies[key]['path'] = path if domain is not None: self.cookies[key]['domain'] = domain if secure: self.cookies[key]['secure'] = True if httponly: self.cookies[key]['httponly'] = True def delete_cookie(self, key, path='/', domain=None): self.set_cookie(key, max_age=0, path=path, domain=domain, expires='Thu, 01-Jan-1970 00:00:00 GMT') def _get_content(self): if self.has_header('Content-Encoding'): return ''.join(self._container) return smart_str(''.join(self._container), self._charset) def _set_content(self, value): self._container = [value] self._is_string = True content = property(_get_content, _set_content) def __iter__(self): self._iterator = iter(self._container) return self def next(self): chunk = self._iterator.next() if isinstance(chunk, unicode): chunk = chunk.encode(self._charset) return str(chunk) def close(self): if hasattr(self._container, 'close'): self._container.close() # The remaining methods partially implement the file-like object interface. # See http://docs.python.org/lib/bltin-file-objects.html def write(self, content): if not self._is_string: raise Exception("This %s instance is not writable" % self.__class__) self._container.append(content) def flush(self): pass def tell(self): if not self._is_string: raise Exception("This %s instance cannot tell its position" % self.__class__) return sum([len(chunk) for chunk in self._container]) class HttpResponseRedirectBase(HttpResponse): allowed_schemes = ['http', 'https', 'ftp'] def __init__(self, redirect_to): super(HttpResponseRedirectBase, self).__init__() parsed = urlparse(redirect_to) if parsed[0] and parsed[0] not in self.allowed_schemes: raise SuspiciousOperation("Unsafe redirect to URL with scheme '%s'" % parsed[0]) self['Location'] = iri_to_uri(redirect_to) class HttpResponseRedirect(HttpResponseRedirectBase): status_code = 302 class HttpResponsePermanentRedirect(HttpResponseRedirectBase): status_code = 301 class HttpResponseNotModified(HttpResponse): status_code = 304 class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods): super(HttpResponseNotAllowed, self).__init__() self['Allow'] = ', '.join(permitted_methods) class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 # A backwards compatible alias for HttpRequest.get_host. def get_host(request): return request.get_host() # It's neither necessary nor appropriate to use # django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus, # this slightly more restricted function. def str_to_unicode(s, encoding): """ Converts basestring objects to unicode, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Returns any non-basestring objects without change. """ if isinstance(s, str): return unicode(s, encoding, 'replace') else: return s
./CrossVul/dataset_final_sorted/CWE-20/py/good_3768_3
crossvul-python_data_good_3766_3
from __future__ import absolute_import import datetime import os import re import sys import time import warnings from pprint import pformat from urllib import urlencode, quote from urlparse import urljoin, urlparse try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: # The mod_python version is more efficient, so try importing it first. from mod_python.util import parse_qsl except ImportError: try: # Python 2.6 and greater from urlparse import parse_qsl except ImportError: # Python 2.5. Works on Python 2.6 but raises PendingDeprecationWarning from cgi import parse_qsl import Cookie # httponly support exists in Python 2.6's Cookie library, # but not in Python 2.5. _morsel_supports_httponly = 'httponly' in Cookie.Morsel._reserved # Some versions of Python 2.7 and later won't need this encoding bug fix: _cookie_encodes_correctly = Cookie.SimpleCookie().value_encode(';') == (';', '"\\073"') # See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256 _tc = Cookie.SimpleCookie() try: _tc.load('foo:bar=1') _cookie_allows_colon_in_names = True except Cookie.CookieError: _cookie_allows_colon_in_names = False if _morsel_supports_httponly and _cookie_encodes_correctly and _cookie_allows_colon_in_names: SimpleCookie = Cookie.SimpleCookie else: if not _morsel_supports_httponly: class Morsel(Cookie.Morsel): def __setitem__(self, K, V): K = K.lower() if K == "httponly": if V: # The superclass rejects httponly as a key, # so we jump to the grandparent. super(Cookie.Morsel, self).__setitem__(K, V) else: super(Morsel, self).__setitem__(K, V) def OutputString(self, attrs=None): output = super(Morsel, self).OutputString(attrs) if "httponly" in self: output += "; httponly" return output else: Morsel = Cookie.Morsel class SimpleCookie(Cookie.SimpleCookie): if not _cookie_encodes_correctly: def value_encode(self, val): # Some browsers do not support quoted-string from RFC 2109, # including some versions of Safari and Internet Explorer. # These browsers split on ';', and some versions of Safari # are known to split on ', '. Therefore, we encode ';' and ',' # SimpleCookie already does the hard work of encoding and decoding. # It uses octal sequences like '\\012' for newline etc. # and non-ASCII chars. We just make use of this mechanism, to # avoid introducing two encoding schemes which would be confusing # and especially awkward for javascript. # NB, contrary to Python docs, value_encode returns a tuple containing # (real val, encoded_val) val, encoded = super(SimpleCookie, self).value_encode(val) encoded = encoded.replace(";", "\\073").replace(",","\\054") # If encoded now contains any quoted chars, we need double quotes # around the whole string. if "\\" in encoded and not encoded.startswith('"'): encoded = '"' + encoded + '"' return val, encoded if not _cookie_allows_colon_in_names or not _morsel_supports_httponly: def load(self, rawdata): self.bad_cookies = set() super(SimpleCookie, self).load(rawdata) for key in self.bad_cookies: del self[key] # override private __set() method: # (needed for using our Morsel, and for laxness with CookieError def _BaseCookie__set(self, key, real_value, coded_value): try: M = self.get(key, Morsel()) M.set(key, real_value, coded_value) dict.__setitem__(self, key, M) except Cookie.CookieError: self.bad_cookies.add(key) dict.__setitem__(self, key, Cookie.Morsel()) class CompatCookie(SimpleCookie): def __init__(self, *args, **kwargs): super(CompatCookie, self).__init__(*args, **kwargs) warnings.warn("CompatCookie is deprecated. Use django.http.SimpleCookie instead.", DeprecationWarning) from django.conf import settings from django.core import signing from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation from django.core.files import uploadhandler from django.http.multipartparser import MultiPartParser from django.http.utils import * from django.utils.datastructures import MultiValueDict, ImmutableList from django.utils.encoding import smart_str, iri_to_uri, force_unicode from django.utils.http import cookie_date from django.utils import timezone RESERVED_CHARS="!*'();:@&=+$,/?%#[]" absolute_http_url_re = re.compile(r"^https?://", re.I) class Http404(Exception): pass RAISE_ERROR = object() def build_request_repr(request, path_override=None, GET_override=None, POST_override=None, COOKIES_override=None, META_override=None): """ Builds and returns the request's representation string. The request's attributes may be overridden by pre-processed values. """ # Since this is called as part of error handling, we need to be very # robust against potentially malformed input. try: get = (pformat(GET_override) if GET_override is not None else pformat(request.GET)) except: get = '<could not parse>' if request._post_parse_error: post = '<could not parse>' else: try: post = (pformat(POST_override) if POST_override is not None else pformat(request.POST)) except: post = '<could not parse>' try: cookies = (pformat(COOKIES_override) if COOKIES_override is not None else pformat(request.COOKIES)) except: cookies = '<could not parse>' try: meta = (pformat(META_override) if META_override is not None else pformat(request.META)) except: meta = '<could not parse>' path = path_override if path_override is not None else request.path return smart_str(u'<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % (request.__class__.__name__, path, unicode(get), unicode(post), unicode(cookies), unicode(meta))) class UnreadablePostError(IOError): pass class HttpRequest(object): """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {} self.path = '' self.path_info = '' self.method = None self._post_parse_error = False def __repr__(self): return build_request_repr(self) def get_host(self): """Returns the HTTP host using the environment or request headers.""" # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = str(self.META['SERVER_PORT']) if server_port != (self.is_secure() and '443' or '80'): host = '%s:%s' % (host, server_port) # Disallow potentially poisoned hostnames. if set(';/?@&=+$,').intersection(host): raise SuspiciousOperation('Invalid HTTP_HOST header: %s' % host) return host def get_full_path(self): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '') def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None): """ Attempts to return a signed cookie. If the signature fails or the cookie has expired, raises an exception... unless you provide the default argument in which case that value will be returned instead. """ try: cookie_value = self.COOKIES[key].encode('utf-8') except KeyError: if default is not RAISE_ERROR: return default else: raise try: value = signing.get_cookie_signer(salt=key + salt).unsign( cookie_value, max_age=max_age) except signing.BadSignature: if default is not RAISE_ERROR: return default else: raise return value def build_absolute_uri(self, location=None): """ Builds an absolute URI from the location and the variables available in this request. If no location is specified, the absolute URI is built on ``request.get_full_path()``. """ if not location: location = self.get_full_path() if not absolute_http_url_re.match(location): current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http', self.get_host(), self.path) location = urljoin(current_uri, location) return iri_to_uri(location) def _is_secure(self): return os.environ.get("HTTPS") == "on" def is_secure(self): # First, check the SECURE_PROXY_SSL_HEADER setting. if settings.SECURE_PROXY_SSL_HEADER: try: header, value = settings.SECURE_PROXY_SSL_HEADER except ValueError: raise ImproperlyConfigured('The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.') if self.META.get(header, None) == value: return True # Failing that, fall back to _is_secure(), which is a hook for # subclasses to implement. return self._is_secure() def is_ajax(self): return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' def _set_encoding(self, val): """ Sets the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, it is removed and recreated on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, '_get'): del self._get if hasattr(self, '_post'): del self._post def _get_encoding(self): return self._encoding encoding = property(_get_encoding, _set_encoding) def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] def _set_upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def _get_upload_handlers(self): if not self._upload_handlers: # If there are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers upload_handlers = property(_get_upload_handlers, _set_upload_handlers) def parse_file_upload(self, META, post_data): """Returns a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning = "You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() @property def body(self): if not hasattr(self, '_body'): if self._read_started: raise Exception("You cannot access body after reading from request's data stream") try: self._body = self.read() except IOError, e: raise UnreadablePostError, e, sys.exc_traceback self._stream = StringIO(self._body) return self._body @property def raw_post_data(self): warnings.warn('HttpRequest.raw_post_data has been deprecated. Use HttpRequest.body instead.', PendingDeprecationWarning) return self.body def _mark_post_parse_error(self): self._post = QueryDict('') self._files = MultiValueDict() self._post_parse_error = True def _load_post_and_files(self): # Populates self._post and self._files if self.method != 'POST': self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_body'): self._mark_post_parse_error() return if self.META.get('CONTENT_TYPE', '').startswith('multipart'): if hasattr(self, '_body'): # Use already read data data = StringIO(self._body) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except: # An error occured while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. # Mark that an error occured. This allows self.__repr__ to # be explicit about it instead of simply representing an # empty POST self._mark_post_parse_error() raise else: self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict() ## File-like and iterator interface. ## ## Expects self._stream to be set to an appropriate source of bytes by ## a corresponding request subclass (WSGIRequest or ModPythonRequest). ## Also when request data has already been read by request.POST or ## request.body, self._stream points to a StringIO instance ## containing that data. def read(self, *args, **kwargs): self._read_started = True return self._stream.read(*args, **kwargs) def readline(self, *args, **kwargs): self._read_started = True return self._stream.readline(*args, **kwargs) def xreadlines(self): while True: buf = self.readline() if not buf: break yield buf __iter__ = xreadlines def readlines(self): return list(iter(self)) class QueryDict(MultiValueDict): """ A specialized MultiValueDict that takes a query string when initialized. This is immutable unless you create a copy of it. Values retrieved from this class are converted from the given encoding (DEFAULT_CHARSET by default) to unicode. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string, mutable=False, encoding=None): MultiValueDict.__init__(self) if not encoding: encoding = settings.DEFAULT_CHARSET self.encoding = encoding for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True self.appendlist(force_unicode(key, encoding, errors='replace'), force_unicode(value, encoding, errors='replace')) self._mutable = mutable def _get_encoding(self): if self._encoding is None: self._encoding = settings.DEFAULT_CHARSET return self._encoding def _set_encoding(self, value): self._encoding = value encoding = property(_get_encoding, _set_encoding) def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = str_to_unicode(key, self.encoding) value = str_to_unicode(value, self.encoding) MultiValueDict.__setitem__(self, key, value) def __delitem__(self, key): self._assert_mutable() super(QueryDict, self).__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in dict.items(self): dict.__setitem__(result, key, value) return result def __deepcopy__(self, memo): import copy result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in dict.items(self): dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = str_to_unicode(key, self.encoding) list_ = [str_to_unicode(elt, self.encoding) for elt in list_] MultiValueDict.setlist(self, key, list_) def setlistdefault(self, key, default_list=()): self._assert_mutable() if key not in self: self.setlist(key, default_list) return MultiValueDict.getlist(self, key) def appendlist(self, key, value): self._assert_mutable() key = str_to_unicode(key, self.encoding) value = str_to_unicode(value, self.encoding) MultiValueDict.appendlist(self, key, value) def update(self, other_dict): self._assert_mutable() f = lambda s: str_to_unicode(s, self.encoding) if hasattr(other_dict, 'lists'): for key, valuelist in other_dict.lists(): for value in valuelist: MultiValueDict.update(self, {f(key): f(value)}) else: d = dict([(f(k), f(v)) for k, v in other_dict.items()]) MultiValueDict.update(self, d) def pop(self, key, *args): self._assert_mutable() return MultiValueDict.pop(self, key, *args) def popitem(self): self._assert_mutable() return MultiValueDict.popitem(self) def clear(self): self._assert_mutable() MultiValueDict.clear(self) def setdefault(self, key, default=None): self._assert_mutable() key = str_to_unicode(key, self.encoding) default = str_to_unicode(default, self.encoding) return MultiValueDict.setdefault(self, key, default) def copy(self): """Returns a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Returns an encoded string of all query string arguments. :arg safe: Used to specify characters which do not require quoting, for example:: >>> q = QueryDict('', mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe))) else: encode = lambda k, v: urlencode({k: v}) for k, list_ in self.lists(): k = smart_str(k, self.encoding) output.extend([encode(k, smart_str(v, self.encoding)) for v in list_]) return '&'.join(output) def parse_cookie(cookie): if cookie == '': return {} if not isinstance(cookie, Cookie.BaseCookie): try: c = SimpleCookie() c.load(cookie) except Cookie.CookieError: # Invalid cookie return {} else: c = cookie cookiedict = {} for key in c.keys(): cookiedict[key] = c.get(key).value return cookiedict class BadHeaderError(ValueError): pass class HttpResponse(object): """A basic HTTP response, with content and dictionary-accessed headers.""" status_code = 200 def __init__(self, content='', mimetype=None, status=None, content_type=None): # _headers is a mapping of the lower-case name to the original case of # the header (required for working with legacy systems) and the header # value. Both the name of the header and its value are ASCII strings. self._headers = {} self._charset = settings.DEFAULT_CHARSET if mimetype: # For backwards compatibility. content_type = mimetype if not content_type: content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE, self._charset) self.content = content self.cookies = SimpleCookie() if status: self.status_code = status self['Content-Type'] = content_type def __str__(self): """Full HTTP message, including headers.""" return '\n'.join(['%s: %s' % (key, value) for key, value in self._headers.values()]) \ + '\n\n' + self.content def _convert_to_ascii(self, *values): """Converts all values to ascii strings.""" for value in values: if isinstance(value, unicode): try: value = value.encode('us-ascii') except UnicodeError, e: e.reason += ', HTTP response headers must be in US-ASCII format' raise else: value = str(value) if '\n' in value or '\r' in value: raise BadHeaderError("Header values can't contain newlines (got %r)" % (value)) yield value def __setitem__(self, header, value): header, value = self._convert_to_ascii(header, value) self._headers[header.lower()] = (header, value) def __delitem__(self, header): try: del self._headers[header.lower()] except KeyError: pass def __getitem__(self, header): return self._headers[header.lower()][1] def __getstate__(self): # SimpleCookie is not pickeable with pickle.HIGHEST_PROTOCOL, so we # serialise to a string instead state = self.__dict__.copy() state['cookies'] = str(state['cookies']) return state def __setstate__(self, state): self.__dict__.update(state) self.cookies = SimpleCookie(self.cookies) def has_header(self, header): """Case-insensitive check for a header.""" return header.lower() in self._headers __contains__ = has_header def items(self): return self._headers.values() def get(self, header, alternate=None): return self._headers.get(header.lower(), (None, alternate))[1] def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False): """ Sets a cookie. ``expires`` can be: - a string in the correct format, - a naive ``datetime.datetime`` object in UTC, - an aware ``datetime.datetime`` object in any time zone. If it is a ``datetime.datetime`` object then ``max_age`` will be calculated. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): if timezone.is_aware(expires): expires = timezone.make_naive(expires, timezone.utc) delta = expires - expires.utcnow() # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta = delta + datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]['expires'] = expires if max_age is not None: self.cookies[key]['max-age'] = max_age # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]['expires'] = cookie_date(time.time() + max_age) if path is not None: self.cookies[key]['path'] = path if domain is not None: self.cookies[key]['domain'] = domain if secure: self.cookies[key]['secure'] = True if httponly: self.cookies[key]['httponly'] = True def set_signed_cookie(self, key, value, salt='', **kwargs): value = signing.get_cookie_signer(salt=key + salt).sign(value) return self.set_cookie(key, value, **kwargs) def delete_cookie(self, key, path='/', domain=None): self.set_cookie(key, max_age=0, path=path, domain=domain, expires='Thu, 01-Jan-1970 00:00:00 GMT') def _get_content(self): if self.has_header('Content-Encoding'): return ''.join([str(e) for e in self._container]) return ''.join([smart_str(e, self._charset) for e in self._container]) def _set_content(self, value): if hasattr(value, '__iter__'): self._container = value self._base_content_is_iter = True else: self._container = [value] self._base_content_is_iter = False content = property(_get_content, _set_content) def __iter__(self): self._iterator = iter(self._container) return self def next(self): chunk = self._iterator.next() if isinstance(chunk, unicode): chunk = chunk.encode(self._charset) return str(chunk) def close(self): if hasattr(self._container, 'close'): self._container.close() # The remaining methods partially implement the file-like object interface. # See http://docs.python.org/lib/bltin-file-objects.html def write(self, content): if self._base_content_is_iter: raise Exception("This %s instance is not writable" % self.__class__) self._container.append(content) def flush(self): pass def tell(self): if self._base_content_is_iter: raise Exception("This %s instance cannot tell its position" % self.__class__) return sum([len(str(chunk)) for chunk in self._container]) class HttpResponseRedirectBase(HttpResponse): allowed_schemes = ['http', 'https', 'ftp'] def __init__(self, redirect_to): super(HttpResponseRedirectBase, self).__init__() parsed = urlparse(redirect_to) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise SuspiciousOperation("Unsafe redirect to URL with scheme '%s'" % parsed.scheme) self['Location'] = iri_to_uri(redirect_to) class HttpResponseRedirect(HttpResponseRedirectBase): status_code = 302 class HttpResponsePermanentRedirect(HttpResponseRedirectBase): status_code = 301 class HttpResponseNotModified(HttpResponse): status_code = 304 class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods): super(HttpResponseNotAllowed, self).__init__() self['Allow'] = ', '.join(permitted_methods) class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 # A backwards compatible alias for HttpRequest.get_host. def get_host(request): return request.get_host() # It's neither necessary nor appropriate to use # django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus, # this slightly more restricted function. def str_to_unicode(s, encoding): """ Converts basestring objects to unicode, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Returns any non-basestring objects without change. """ if isinstance(s, str): return unicode(s, encoding, 'replace') else: return s
./CrossVul/dataset_final_sorted/CWE-20/py/good_3766_3
crossvul-python_data_bad_2141_4
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import os import re import codecs import jinja2 from jinja2.runtime import StrictUndefined from jinja2.exceptions import TemplateSyntaxError import yaml import json from ansible import errors import ansible.constants as C import time import subprocess import datetime import pwd import ast import traceback from ansible.utils.string_functions import count_newlines_from_end class Globals(object): FILTERS = None def __init__(self): pass def _get_filters(): ''' return filter plugin instances ''' if Globals.FILTERS is not None: return Globals.FILTERS from ansible import utils plugins = [ x for x in utils.plugins.filter_loader.all()] filters = {} for fp in plugins: filters.update(fp.filters()) Globals.FILTERS = filters return Globals.FILTERS def _get_extensions(): ''' return jinja2 extensions to load ''' ''' if some extensions are set via jinja_extensions in ansible.cfg, we try to load them with the jinja environment ''' jinja_exts = [] if C.DEFAULT_JINJA2_EXTENSIONS: ''' Let's make sure the configuration directive doesn't contain spaces and split extensions in an array ''' jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',') return jinja_exts class Flags: LEGACY_TEMPLATE_WARNING = False # TODO: refactor this file FILTER_PLUGINS = None _LISTRE = re.compile(r"(\w+)\[(\d+)\]") JINJA2_OVERRIDE='#jinja2:' def lookup(name, *args, **kwargs): from ansible import utils instance = utils.plugins.lookup_loader.get(name.lower(), basedir=kwargs.get('basedir',None)) vars = kwargs.get('vars', None) if instance is not None: # safely catch run failures per #5059 try: ran = instance.run(*args, inject=vars, **kwargs) except errors.AnsibleError: # Plugin raised this on purpose raise except Exception, e: ran = None if ran: ran = ",".join(ran) return ran else: raise errors.AnsibleError("lookup plugin (%s) not found" % name) def template(basedir, varname, vars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True): ''' templates a data structure by traversing it and substituting for other data structures ''' from ansible import utils try: if convert_bare and isinstance(varname, basestring): first_part = varname.split(".")[0].split("[")[0] if first_part in vars and '{{' not in varname and '$' not in varname: varname = "{{%s}}" % varname if isinstance(varname, basestring): if '{{' in varname or '{%' in varname: varname = template_from_string(basedir, varname, vars, fail_on_undefined) if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["): eval_results = utils.safe_eval(varname, locals=vars, include_exceptions=True) if eval_results[1] is None: varname = eval_results[0] return varname elif isinstance(varname, (list, tuple)): return [template(basedir, v, vars, lookup_fatal, depth, expand_lists, fail_on_undefined=fail_on_undefined) for v in varname] elif isinstance(varname, dict): d = {} for (k, v) in varname.iteritems(): d[k] = template(basedir, v, vars, lookup_fatal, depth, expand_lists, fail_on_undefined=fail_on_undefined) return d else: return varname except errors.AnsibleFilterError: if filter_fatal: raise else: return varname class _jinja2_vars(object): ''' Helper class to template all variable content before jinja2 sees it. This is done by hijacking the variable storage that jinja2 uses, and overriding __contains__ and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large hashes that inject tends to be. To facilitate using builtin jinja2 things like range, globals are handled here. extras is a list of locals to also search for variables. ''' def __init__(self, basedir, vars, globals, fail_on_undefined, *extras): self.basedir = basedir self.vars = vars self.globals = globals self.fail_on_undefined = fail_on_undefined self.extras = extras def __contains__(self, k): if k in self.vars: return True for i in self.extras: if k in i: return True if k in self.globals: return True return False def __getitem__(self, varname): if varname not in self.vars: for i in self.extras: if varname in i: return i[varname] if varname in self.globals: return self.globals[varname] else: raise KeyError("undefined variable: %s" % varname) var = self.vars[varname] # HostVars is special, return it as-is if isinstance(var, dict) and type(var) != dict: return var else: return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined) def add_locals(self, locals): ''' If locals are provided, create a copy of self containing those locals in addition to what is already in this variable proxy. ''' if locals is None: return self return _jinja2_vars(self.basedir, self.vars, self.globals, self.fail_on_undefined, locals, *self.extras) class J2Template(jinja2.environment.Template): ''' This class prevents Jinja2 from running _jinja2_vars through dict() Without this, {% include %} and similar will create new contexts unlike the special one created in template_from_file. This ensures they are all alike, with the exception of potential locals. ''' def new_context(self, vars=None, shared=False, locals=None): return jinja2.runtime.Context(self.environment, vars.add_locals(locals), self.name, self.blocks) def template_from_file(basedir, path, vars, vault_password=None): ''' run a file through the templating engine ''' fail_on_undefined = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR from ansible import utils realpath = utils.path_dwim(basedir, path) loader=jinja2.FileSystemLoader([basedir,os.path.dirname(realpath)]) def my_lookup(*args, **kwargs): kwargs['vars'] = vars return lookup(*args, basedir=basedir, **kwargs) def my_finalize(thing): return thing if thing is not None else '' environment = jinja2.Environment(loader=loader, trim_blocks=True, extensions=_get_extensions()) environment.filters.update(_get_filters()) environment.globals['lookup'] = my_lookup environment.globals['finalize'] = my_finalize if fail_on_undefined: environment.undefined = StrictUndefined try: data = codecs.open(realpath, encoding="utf8").read() except UnicodeDecodeError: raise errors.AnsibleError("unable to process as utf-8: %s" % realpath) except: raise errors.AnsibleError("unable to read %s" % realpath) # Get jinja env overrides from template if data.startswith(JINJA2_OVERRIDE): eol = data.find('\n') line = data[len(JINJA2_OVERRIDE):eol] data = data[eol+1:] for pair in line.split(','): (key,val) = pair.split(':') setattr(environment,key.strip(),ast.literal_eval(val.strip())) environment.template_class = J2Template try: t = environment.from_string(data) except TemplateSyntaxError, e: # Throw an exception which includes a more user friendly error message values = {'name': realpath, 'lineno': e.lineno, 'error': str(e)} msg = 'file: %(name)s, line number: %(lineno)s, error: %(error)s' % \ values error = errors.AnsibleError(msg) raise error vars = vars.copy() try: template_uid = pwd.getpwuid(os.stat(realpath).st_uid).pw_name except: template_uid = os.stat(realpath).st_uid vars['template_host'] = os.uname()[1] vars['template_path'] = realpath vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(realpath)) vars['template_uid'] = template_uid vars['template_fullpath'] = os.path.abspath(realpath) vars['template_run_date'] = datetime.datetime.now() managed_default = C.DEFAULT_MANAGED_STR managed_str = managed_default.format( host = vars['template_host'], uid = vars['template_uid'], file = vars['template_path'] ) vars['ansible_managed'] = time.strftime( managed_str, time.localtime(os.path.getmtime(realpath)) ) # This line performs deep Jinja2 magic that uses the _jinja2_vars object for vars # Ideally, this could use some API where setting shared=True and the object won't get # passed through dict(o), but I have not found that yet. try: res = jinja2.utils.concat(t.root_render_func(t.new_context(_jinja2_vars(basedir, vars, t.globals, fail_on_undefined), shared=True))) except jinja2.exceptions.UndefinedError, e: raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e)) except jinja2.exceptions.TemplateNotFound, e: # Throw an exception which includes a more user friendly error message # This likely will happen for included sub-template. Not that besides # pure "file not found" it may happen due to Jinja2's "security" # checks on path. values = {'name': realpath, 'subname': str(e)} msg = 'file: %(name)s, error: Cannot find/not allowed to load (include) template %(subname)s' % \ values error = errors.AnsibleError(msg) raise error # The low level calls above do not preserve the newline # characters at the end of the input data, so we use the # calculate the difference in newlines and append them # to the resulting output for parity res_newlines = count_newlines_from_end(res) data_newlines = count_newlines_from_end(data) if data_newlines > res_newlines: res += '\n' * (data_newlines - res_newlines) if isinstance(res, unicode): # do not try to re-template a unicode string result = res else: result = template(basedir, res, vars) return result def template_from_string(basedir, data, vars, fail_on_undefined=False): ''' run a string through the (Jinja2) templating engine ''' try: if type(data) == str: data = unicode(data, 'utf-8') def my_finalize(thing): return thing if thing is not None else '' environment = jinja2.Environment(trim_blocks=True, undefined=StrictUndefined, extensions=_get_extensions(), finalize=my_finalize) environment.filters.update(_get_filters()) environment.template_class = J2Template if '_original_file' in vars: basedir = os.path.dirname(vars['_original_file']) filesdir = os.path.abspath(os.path.join(basedir, '..', 'files')) if os.path.exists(filesdir): basedir = filesdir # 6227 if isinstance(data, unicode): try: data = data.decode('utf-8') except UnicodeEncodeError, e: pass try: t = environment.from_string(data) except Exception, e: if 'recursion' in str(e): raise errors.AnsibleError("recursive loop detected in template string: %s" % data) else: return data def my_lookup(*args, **kwargs): kwargs['vars'] = vars return lookup(*args, basedir=basedir, **kwargs) t.globals['lookup'] = my_lookup t.globals['finalize'] = my_finalize jvars =_jinja2_vars(basedir, vars, t.globals, fail_on_undefined) new_context = t.new_context(jvars, shared=True) rf = t.root_render_func(new_context) try: res = jinja2.utils.concat(rf) except TypeError, te: if 'StrictUndefined' in str(te): raise errors.AnsibleUndefinedVariable( "Unable to look up a name or access an attribute in template string. " + \ "Make sure your variable name does not contain invalid characters like '-'." ) else: raise errors.AnsibleError("an unexpected type error occured. Error was %s" % te) return res except (jinja2.exceptions.UndefinedError, errors.AnsibleUndefinedVariable): if fail_on_undefined: raise else: return data
./CrossVul/dataset_final_sorted/CWE-20/py/bad_2141_4
crossvul-python_data_bad_2156_0
# # The Python Imaging Library. # $Id$ # # Mac OS X icns file decoder, based on icns.py by Bob Ippolito. # # history: # 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies. # # Copyright (c) 2004 by Bob Ippolito. # Copyright (c) 2004 by Secret Labs. # Copyright (c) 2004 by Fredrik Lundh. # Copyright (c) 2014 by Alastair Houghton. # # See the README file for information on usage and redistribution. # from PIL import Image, ImageFile, PngImagePlugin, _binary import struct, io enable_jpeg2k = hasattr(Image.core, 'jp2klib_version') if enable_jpeg2k: from PIL import Jpeg2KImagePlugin i8 = _binary.i8 HEADERSIZE = 8 def nextheader(fobj): return struct.unpack('>4sI', fobj.read(HEADERSIZE)) def read_32t(fobj, start_length, size): # The 128x128 icon seems to have an extra header for some reason. (start, length) = start_length fobj.seek(start) sig = fobj.read(4) if sig != b'\x00\x00\x00\x00': raise SyntaxError('Unknown signature, expecting 0x00000000') return read_32(fobj, (start + 4, length - 4), size) def read_32(fobj, start_length, size): """ Read a 32bit RGB icon resource. Seems to be either uncompressed or an RLE packbits-like scheme. """ (start, length) = start_length fobj.seek(start) pixel_size = (size[0] * size[2], size[1] * size[2]) sizesq = pixel_size[0] * pixel_size[1] if length == sizesq * 3: # uncompressed ("RGBRGBGB") indata = fobj.read(length) im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1) else: # decode image im = Image.new("RGB", pixel_size, None) for band_ix in range(3): data = [] bytesleft = sizesq while bytesleft > 0: byte = fobj.read(1) if not byte: break byte = i8(byte) if byte & 0x80: blocksize = byte - 125 byte = fobj.read(1) for i in range(blocksize): data.append(byte) else: blocksize = byte + 1 data.append(fobj.read(blocksize)) bytesleft -= blocksize if bytesleft <= 0: break if bytesleft != 0: raise SyntaxError( "Error reading channel [%r left]" % bytesleft ) band = Image.frombuffer( "L", pixel_size, b"".join(data), "raw", "L", 0, 1 ) im.im.putband(band.im, band_ix) return {"RGB": im} def read_mk(fobj, start_length, size): # Alpha masks seem to be uncompressed (start, length) = start_length fobj.seek(start) pixel_size = (size[0] * size[2], size[1] * size[2]) sizesq = pixel_size[0] * pixel_size[1] band = Image.frombuffer( "L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1 ) return {"A": band} def read_png_or_jpeg2000(fobj, start_length, size): (start, length) = start_length fobj.seek(start) sig = fobj.read(12) if sig[:8] == b'\x89PNG\x0d\x0a\x1a\x0a': fobj.seek(start) im = PngImagePlugin.PngImageFile(fobj) return {"RGBA": im} elif sig[:4] == b'\xff\x4f\xff\x51' \ or sig[:4] == b'\x0d\x0a\x87\x0a' \ or sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a': if not enable_jpeg2k: raise ValueError('Unsupported icon subimage format (rebuild PIL with JPEG 2000 support to fix this)') # j2k, jpc or j2c fobj.seek(start) jp2kstream = fobj.read(length) f = io.BytesIO(jp2kstream) im = Jpeg2KImagePlugin.Jpeg2KImageFile(f) if im.mode != 'RGBA': im = im.convert('RGBA') return {"RGBA": im} else: raise ValueError('Unsupported icon subimage format') class IcnsFile: SIZES = { (512, 512, 2): [ (b'ic10', read_png_or_jpeg2000), ], (512, 512, 1): [ (b'ic09', read_png_or_jpeg2000), ], (256, 256, 2): [ (b'ic14', read_png_or_jpeg2000), ], (256, 256, 1): [ (b'ic08', read_png_or_jpeg2000), ], (128, 128, 2): [ (b'ic13', read_png_or_jpeg2000), ], (128, 128, 1): [ (b'ic07', read_png_or_jpeg2000), (b'it32', read_32t), (b't8mk', read_mk), ], (64, 64, 1): [ (b'icp6', read_png_or_jpeg2000), ], (32, 32, 2): [ (b'ic12', read_png_or_jpeg2000), ], (48, 48, 1): [ (b'ih32', read_32), (b'h8mk', read_mk), ], (32, 32, 1): [ (b'icp5', read_png_or_jpeg2000), (b'il32', read_32), (b'l8mk', read_mk), ], (16, 16, 2): [ (b'ic11', read_png_or_jpeg2000), ], (16, 16, 1): [ (b'icp4', read_png_or_jpeg2000), (b'is32', read_32), (b's8mk', read_mk), ], } def __init__(self, fobj): """ fobj is a file-like object as an icns resource """ # signature : (start, length) self.dct = dct = {} self.fobj = fobj sig, filesize = nextheader(fobj) if sig != b'icns': raise SyntaxError('not an icns file') i = HEADERSIZE while i < filesize: sig, blocksize = nextheader(fobj) i += HEADERSIZE blocksize -= HEADERSIZE dct[sig] = (i, blocksize) fobj.seek(blocksize, 1) i += blocksize def itersizes(self): sizes = [] for size, fmts in self.SIZES.items(): for (fmt, reader) in fmts: if fmt in self.dct: sizes.append(size) break return sizes def bestsize(self): sizes = self.itersizes() if not sizes: raise SyntaxError("No 32bit icon resources found") return max(sizes) def dataforsize(self, size): """ Get an icon resource as {channel: array}. Note that the arrays are bottom-up like windows bitmaps and will likely need to be flipped or transposed in some way. """ dct = {} for code, reader in self.SIZES[size]: desc = self.dct.get(code) if desc is not None: dct.update(reader(self.fobj, desc, size)) return dct def getimage(self, size=None): if size is None: size = self.bestsize() if len(size) == 2: size = (size[0], size[1], 1) channels = self.dataforsize(size) im = channels.get('RGBA', None) if im: return im im = channels.get("RGB").copy() try: im.putalpha(channels["A"]) except KeyError: pass return im ## # Image plugin for Mac OS icons. class IcnsImageFile(ImageFile.ImageFile): """ PIL read-only image support for Mac OS .icns files. Chooses the best resolution, but will possibly load a different size image if you mutate the size attribute before calling 'load'. The info dictionary has a key 'sizes' that is a list of sizes that the icns file has. """ format = "ICNS" format_description = "Mac OS icns resource" def _open(self): self.icns = IcnsFile(self.fp) self.mode = 'RGBA' self.best_size = self.icns.bestsize() self.size = (self.best_size[0] * self.best_size[2], self.best_size[1] * self.best_size[2]) self.info['sizes'] = self.icns.itersizes() # Just use this to see if it's loaded or not yet. self.tile = ('',) def load(self): if len(self.size) == 3: self.best_size = self.size self.size = (self.best_size[0] * self.best_size[2], self.best_size[1] * self.best_size[2]) Image.Image.load(self) if not self.tile: return self.load_prepare() # This is likely NOT the best way to do it, but whatever. im = self.icns.getimage(self.best_size) # If this is a PNG or JPEG 2000, it won't be loaded yet im.load() self.im = im.im self.mode = im.mode self.size = im.size self.fp = None self.icns = None self.tile = () self.load_end() Image.register_open("ICNS", IcnsImageFile, lambda x: x[:4] == b'icns') Image.register_extension("ICNS", '.icns') if __name__ == '__main__': import os, sys imf = IcnsImageFile(open(sys.argv[1], 'rb')) for size in imf.info['sizes']: imf.size = size imf.load() im = imf.im im.save('out-%s-%s-%s.png' % size) im = Image.open(open(sys.argv[1], "rb")) im.save("out.png") if sys.platform == 'windows': os.startfile("out.png")
./CrossVul/dataset_final_sorted/CWE-20/py/bad_2156_0
crossvul-python_data_good_5568_1
# packet.py # # Copyright 2002-2005,2007 Wichert Akkerman <wichert@wiggy.net> # # A RADIUS packet as defined in RFC 2138 import struct import random try: import hashlib md5_constructor = hashlib.md5 except ImportError: # BBB for python 2.4 import md5 md5_constructor = md5.new import six from pyrad import tools # Packet codes AccessRequest = 1 AccessAccept = 2 AccessReject = 3 AccountingRequest = 4 AccountingResponse = 5 AccessChallenge = 11 StatusServer = 12 StatusClient = 13 DisconnectRequest = 40 DisconnectACK = 41 DisconnectNAK = 42 CoARequest = 43 CoAACK = 44 CoANAK = 45 # Use cryptographic-safe random generator as provided by the OS. random_generator = random.SystemRandom() # Current ID CurrentID = random_generator.randrange(1, 255) class PacketError(Exception): pass class Packet(dict): """Packet acts like a standard python map to provide simple access to the RADIUS attributes. Since RADIUS allows for repeated attributes the value will always be a sequence. pyrad makes sure to preserve the ordering when encoding and decoding packets. There are two ways to use the map intereface: if attribute names are used pyrad take care of en-/decoding data. If the attribute type number (or a vendor ID/attribute type tuple for vendor attributes) is used you work with the raw data. Normally you will not use this class directly, but one of the :obj:`AuthPacket` or :obj:`AcctPacket` classes. """ def __init__(self, code=0, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param secret: secret needed to communicate with a RADIUS server :type secret: string :param id: packet identifaction number :type id: integer (8 bits) :param code: packet type code :type code: integer (8bits) :param packet: raw packet to decode :type packet: string """ dict.__init__(self) self.code = code if id is not None: self.id = id else: self.id = CreateID() if not isinstance(secret, six.binary_type): raise TypeError('secret must be a binary string') self.secret = secret if authenticator is not None and \ not isinstance(authenticator, six.binary_type): raise TypeError('authenticator must be a binary string') self.authenticator = authenticator if 'dict' in attributes: self.dict = attributes['dict'] if 'packet' in attributes: self.DecodePacket(attributes['packet']) for (key, value) in attributes.items(): if key in ['dict', 'fd', 'packet']: continue key = key.replace('_', '-') self.AddAttribute(key, value) def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return Packet(id=self.id, secret=self.secret, authenticator=self.authenticator, dict=self.dict, **attributes) def _DecodeValue(self, attr, value): if attr.values.HasBackward(value): return attr.values.GetBackward(value) else: return tools.DecodeAttr(attr.type, value) def _EncodeValue(self, attr, value): if attr.values.HasForward(value): return attr.values.GetForward(value) else: return tools.EncodeAttr(attr.type, value) def _EncodeKeyValues(self, key, values): if not isinstance(key, str): return (key, values) attr = self.dict.attributes[key] if attr.vendor: key = (self.dict.vendors.GetForward(attr.vendor), attr.code) else: key = attr.code return (key, [self._EncodeValue(attr, v) for v in values]) def _EncodeKey(self, key): if not isinstance(key, str): return key attr = self.dict.attributes[key] if attr.vendor: return (self.dict.vendors.GetForward(attr.vendor), attr.code) else: return attr.code def _DecodeKey(self, key): """Turn a key into a string if possible""" if self.dict.attrindex.HasBackward(key): return self.dict.attrindex.GetBackward(key) return key def AddAttribute(self, key, value): """Add an attribute to the packet. :param key: attribute name or identification :type key: string, attribute code or (vendor code, attribute code) tuple :param value: value :type value: depends on type of attribute """ (key, value) = self._EncodeKeyValues(key, [value]) value = value[0] self.setdefault(key, []).append(value) def __getitem__(self, key): if not isinstance(key, six.string_types): return dict.__getitem__(self, key) values = dict.__getitem__(self, self._EncodeKey(key)) attr = self.dict.attributes[key] res = [] for v in values: res.append(self._DecodeValue(attr, v)) return res def __contains__(self, key): try: return dict.__contains__(self, self._EncodeKey(key)) except KeyError: return False has_key = __contains__ def __delitem__(self, key): dict.__delitem__(self, self._EncodeKey(key)) def __setitem__(self, key, item): if isinstance(key, six.string_types): (key, item) = self._EncodeKeyValues(key, [item]) dict.__setitem__(self, key, item) else: assert isinstance(item, list) dict.__setitem__(self, key, item) def keys(self): return [self._DecodeKey(key) for key in dict.keys(self)] @staticmethod def CreateAuthenticator(): """Create a packet autenticator. All RADIUS packets contain a sixteen byte authenticator which is used to authenticate replies from the RADIUS server and in the password hiding algorithm. This function returns a suitable random string that can be used as an authenticator. :return: valid packet authenticator :rtype: binary string """ data = [] for i in range(16): data.append(random_generator.randrange(0, 256)) if six.PY3: return bytes(data) else: return ''.join(chr(b) for b in data) def CreateID(self): """Create a packet ID. All RADIUS requests have a ID which is used to identify a request. This is used to detect retries and replay attacks. This function returns a suitable random number that can be used as ID. :return: ID number :rtype: integer """ return random_generator.randrange(0, 256) def ReplyPacket(self): """Create a ready-to-transmit authentication reply packet. Returns a RADIUS packet which can be directly transmitted to a RADIUS server. This differs with Packet() in how the authenticator is calculated. :return: raw packet :rtype: string """ assert(self.authenticator) assert(self.secret) attr = self._PktEncodeAttributes() header = struct.pack('!BBH', self.code, self.id, (20 + len(attr))) authenticator = md5_constructor(header[0:4] + self.authenticator + attr + self.secret).digest() return header + authenticator + attr def VerifyReply(self, reply, rawreply=None): if reply.id != self.id: return False if rawreply is None: rawreply = reply.ReplyPacket() hash = md5_constructor(rawreply[0:4] + self.authenticator + rawreply[20:] + self.secret).digest() if hash != rawreply[4:20]: return False return True def _PktEncodeAttribute(self, key, value): if isinstance(key, tuple): value = struct.pack('!L', key[0]) + \ self._PktEncodeAttribute(key[1], value) key = 26 return struct.pack('!BB', key, (len(value) + 2)) + value def _PktEncodeAttributes(self): result = six.b('') for (code, datalst) in self.items(): for data in datalst: result += self._PktEncodeAttribute(code, data) return result def _PktDecodeVendorAttribute(self, data): # Check if this packet is long enough to be in the # RFC2865 recommended form if len(data) < 6: return (26, data) (vendor, type, length) = struct.unpack('!LBB', data[:6])[0:3] # Another sanity check if len(data) != length + 4: return (26, data) return ((vendor, type), data[6:]) def DecodePacket(self, packet): """Initialize the object from raw packet data. Decode a packet as received from the network and decode it. :param packet: raw packet :type packet: string""" try: (self.code, self.id, length, self.authenticator) = \ struct.unpack('!BBH16s', packet[0:20]) except struct.error: raise PacketError('Packet header is corrupt') if len(packet) != length: raise PacketError('Packet has invalid length') if length > 8192: raise PacketError('Packet length is too long (%d)' % length) self.clear() packet = packet[20:] while packet: try: (key, attrlen) = struct.unpack('!BB', packet[0:2]) except struct.error: raise PacketError('Attribute header is corrupt') if attrlen < 2: raise PacketError( 'Attribute length is too small (%d)' % attrlen) value = packet[2:attrlen] if key == 26: (key, value) = self._PktDecodeVendorAttribute(value) self.setdefault(key, []).append(value) packet = packet[attrlen:] class AuthPacket(Packet): def __init__(self, code=AccessRequest, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param code: packet type code :type code: integer (8bits) :param id: packet identifaction number :type id: integer (8 bits) :param secret: secret needed to communicate with a RADIUS server :type secret: string :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param packet: raw packet to decode :type packet: string """ Packet.__init__(self, code, id, secret, authenticator, **attributes) def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return AuthPacket(AccessAccept, self.id, self.secret, self.authenticator, dict=self.dict, **attributes) def RequestPacket(self): """Create a ready-to-transmit authentication request packet. Return a RADIUS packet which can be directly transmitted to a RADIUS server. :return: raw packet :rtype: string """ attr = self._PktEncodeAttributes() if self.authenticator is None: self.authenticator = self.CreateAuthenticator() if self.id is None: self.id = self.CreateID() header = struct.pack('!BBH16s', self.code, self.id, (20 + len(attr)), self.authenticator) return header + attr def PwDecrypt(self, password): """Unobfuscate a RADIUS password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. This function reverses the obfuscation process. :param password: obfuscated form of password :type password: binary string :return: plaintext password :rtype: unicode string """ buf = password pw = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): pw += bytes((hash[i] ^ buf[i],)) else: for i in range(16): pw += chr(ord(hash[i]) ^ ord(buf[i])) (last, buf) = (buf[:16], buf[16:]) while pw.endswith(six.b('\x00')): pw = pw[:-1] return pw.decode('utf-8') def PwCrypt(self, password): """Obfuscate password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. If no authenticator has been set before calling PwCrypt one is created automatically. Changing the authenticator after setting a password that has been encrypted using this function will not work. :param password: plaintext password :type password: unicode stringn :return: obfuscated version of the password :rtype: binary string """ if self.authenticator is None: self.authenticator = self.CreateAuthenticator() if isinstance(password, six.text_type): password = password.encode('utf-8') buf = password if len(password) % 16 != 0: buf += six.b('\x00') * (16 - (len(password) % 16)) hash = md5_constructor(self.secret + self.authenticator).digest() result = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): result += bytes((hash[i] ^ buf[i],)) else: for i in range(16): result += chr(ord(hash[i]) ^ ord(buf[i])) last = result[-16:] buf = buf[16:] return result class AcctPacket(Packet): """RADIUS accounting packets. This class is a specialization of the generic :obj:`Packet` class for accounting packets. """ def __init__(self, code=AccountingRequest, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param secret: secret needed to communicate with a RADIUS server :type secret: string :param id: packet identifaction number :type id: integer (8 bits) :param code: packet type code :type code: integer (8bits) :param packet: raw packet to decode :type packet: string """ Packet.__init__(self, code, id, secret, authenticator, **attributes) if 'packet' in attributes: self.raw_packet = attributes['packet'] def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return AcctPacket(AccountingResponse, self.id, self.secret, self.authenticator, dict=self.dict, **attributes) def VerifyAcctRequest(self): """Verify request authenticator. :return: True if verification failed else False :rtype: boolean """ assert(self.raw_packet) hash = md5_constructor(self.raw_packet[0:4] + 16 * six.b('\x00') + self.raw_packet[20:] + self.secret).digest() return hash == self.authenticator def RequestPacket(self): """Create a ready-to-transmit authentication request packet. Return a RADIUS packet which can be directly transmitted to a RADIUS server. :return: raw packet :rtype: string """ attr = self._PktEncodeAttributes() if self.id is None: self.id = self.CreateID() header = struct.pack('!BBH', self.code, self.id, (20 + len(attr))) self.authenticator = md5_constructor(header[0:4] + 16 * six.b('\x00') + attr + self.secret).digest() return header + self.authenticator + attr def CreateID(): """Generate a packet ID. :return: packet ID :rtype: 8 bit integer """ global CurrentID CurrentID = (CurrentID + 1) % 256 return CurrentID
./CrossVul/dataset_final_sorted/CWE-20/py/good_5568_1
crossvul-python_data_bad_3268_2
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import tempfile from string import ascii_letters, digits from ansible.errors import AnsibleOptionsError from ansible.module_utils.six import string_types from ansible.module_utils.six.moves import configparser from ansible.module_utils._text import to_text from ansible.parsing.quoting import unquote from ansible.utils.path import makedirs_safe BOOL_TRUE = frozenset([ "true", "t", "y", "1", "yes", "on" ]) def mk_boolean(value): ret = value if not isinstance(value, bool): if value is None: ret = False ret = (str(value).lower() in BOOL_TRUE) return ret def shell_expand(path, expand_relative_paths=False): ''' shell_expand is needed as os.path.expanduser does not work when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE ''' if path: path = os.path.expanduser(os.path.expandvars(path)) if expand_relative_paths and not path.startswith('/'): # paths are always 'relative' to the config? if 'CONFIG_FILE' in globals(): CFGDIR = os.path.dirname(CONFIG_FILE) path = os.path.join(CFGDIR, path) path = os.path.abspath(path) return path def get_config(p, section, key, env_var, default, value_type=None, expand_relative_paths=False): ''' return a configuration variable with casting :arg p: A ConfigParser object to look for the configuration in :arg section: A section of the ini config that should be examined for this section. :arg key: The config key to get this config from :arg env_var: An Environment variable to check for the config var. If this is set to None then no environment variable will be used. :arg default: A default value to assign to the config var if nothing else sets it. :kwarg value_type: The type of the value. This can be any of the following strings: :boolean: sets the value to a True or False value :integer: Sets the value to an integer or raises a ValueType error :float: Sets the value to a float or raises a ValueType error :list: Treats the value as a comma separated list. Split the value and return it as a python list. :none: Sets the value to None :path: Expands any environment variables and tilde's in the value. :tmp_path: Create a unique temporary directory inside of the directory specified by value and return its path. :pathlist: Treat the value as a typical PATH string. (On POSIX, this means colon separated strings.) Split the value and then expand each part for environment variables and tildes. :kwarg expand_relative_paths: for pathlist and path types, if this is set to True then also change any relative paths into absolute paths. The default is False. ''' value = _get_config(p, section, key, env_var, default) if value_type == 'boolean': value = mk_boolean(value) elif value: if value_type == 'integer': value = int(value) elif value_type == 'float': value = float(value) elif value_type == 'list': if isinstance(value, string_types): value = [x.strip() for x in value.split(',')] elif value_type == 'none': if value == "None": value = None elif value_type == 'path': value = shell_expand(value, expand_relative_paths=expand_relative_paths) elif value_type == 'tmppath': value = shell_expand(value) if not os.path.exists(value): makedirs_safe(value, 0o700) prefix = 'ansible-local-%s' % os.getpid() value = tempfile.mkdtemp(prefix=prefix, dir=value) elif value_type == 'pathlist': if isinstance(value, string_types): value = [shell_expand(x, expand_relative_paths=expand_relative_paths) \ for x in value.split(os.pathsep)] elif isinstance(value, string_types): value = unquote(value) return to_text(value, errors='surrogate_or_strict', nonstring='passthru') def _get_config(p, section, key, env_var, default): ''' helper function for get_config ''' value = default if p is not None: try: value = p.get(section, key, raw=True) except: pass if env_var is not None: env_value = os.environ.get(env_var, None) if env_value is not None: value = env_value return to_text(value, errors='surrogate_or_strict', nonstring='passthru') def load_config_file(): ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' p = configparser.ConfigParser() path0 = os.getenv("ANSIBLE_CONFIG", None) if path0 is not None: path0 = os.path.expanduser(path0) if os.path.isdir(path0): path0 += "/ansible.cfg" try: path1 = os.getcwd() + "/ansible.cfg" except OSError: path1 = None path2 = os.path.expanduser("~/.ansible.cfg") path3 = "/etc/ansible/ansible.cfg" for path in [path0, path1, path2, path3]: if path is not None and os.path.exists(path): try: p.read(path) except configparser.Error as e: raise AnsibleOptionsError("Error reading config file: \n{0}".format(e)) return p, path return None, '' p, CONFIG_FILE = load_config_file() # check all of these extensions when looking for yaml files for things like # group variables -- really anything we can load YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] # the default whitelist for cow stencils DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant', 'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep', 'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder', 'vader-koala', 'vader', 'www',] # sections in config file DEFAULTS='defaults' # FIXME: add deprecation warning when these get set #### DEPRECATED VARS #### # #### If --tags or --skip-tags is given multiple times on the CLI and this is # True, merge the lists of tags together. If False, let the last argument # overwrite any previous ones. Behaviour is overwrite through 2.2. 2.3 # overwrites but prints deprecation. 2.4 the default is to merge. MERGE_MULTIPLE_CLI_TAGS = get_config(p, DEFAULTS, 'merge_multiple_cli_tags', 'ANSIBLE_MERGE_MULTIPLE_CLI_TAGS', True, value_type='boolean') #### GENERALLY CONFIGURABLE THINGS #### DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, value_type='boolean') DEFAULT_VERBOSITY = get_config(p, DEFAULTS, 'verbosity', 'ANSIBLE_VERBOSITY', 0, value_type='integer') DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', '/etc/ansible/hosts', value_type='path') DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles', value_type='pathlist', expand_relative_paths=True) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '~/.ansible/tmp') DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '~/.ansible/tmp', value_type='tmppath') DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') DEFAULT_FACT_PATH = get_config(p, DEFAULTS, 'fact_path', 'ANSIBLE_FACT_PATH', None, value_type='path') DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, value_type='integer') DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '') DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8')) DEFAULT_MODULE_SET_LOCALE = get_config(p, DEFAULTS, 'module_set_locale','ANSIBLE_MODULE_SET_LOCALE',False, value_type='boolean') DEFAULT_MODULE_COMPRESSION= get_config(p, DEFAULTS, 'module_compression', None, 'ZIP_DEFLATED') DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, value_type='integer') DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, value_type='integer') DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None) DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, value_type='boolean') DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, value_type='path') DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, value_type='integer') DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, value_type='boolean') DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, value_type='path') DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart') DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', 'smart') DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, value_type='boolean') DEFAULT_SSH_TRANSFER_METHOD = get_config(p, 'ssh_connection', 'transfer_method', 'ANSIBLE_SSH_TRANSFER_METHOD', None) DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed') DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, value_type='boolean') DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, value_type='boolean') DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None) DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GATHER_SUBSET', 'all').lower() DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, value_type='integer') DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', value_type='path') DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, value_type='boolean') DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], value_type='list') DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, value_type='integer') DEFAULT_INTERNAL_POLL_INTERVAL = get_config(p, DEFAULTS, 'internal_poll_interval', None, 0.001, value_type='float') ERROR_ON_MISSING_HANDLER = get_config(p, DEFAULTS, 'error_on_missing_handler', 'ANSIBLE_ERROR_ON_MISSING_HANDLER', True, value_type='boolean') SHOW_CUSTOM_STATS = get_config(p, DEFAULTS, 'show_custom_stats', 'ANSIBLE_SHOW_CUSTOM_STATS', False, value_type='boolean') NAMESPACE_FACTS = get_config(p, DEFAULTS, 'restrict_facts_namespace', 'ANSIBLE_RESTRICT_FACTS', False, value_type='boolean') # static includes DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, value_type='boolean') DEFAULT_HANDLER_INCLUDES_STATIC = get_config(p, DEFAULTS, 'handler_includes_static', 'ANSIBLE_HANDLER_INCLUDES_STATIC', False, value_type='boolean') # disclosure DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, value_type='boolean') DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, value_type='boolean') ALLOW_WORLD_READABLE_TMPFILES = get_config(p, DEFAULTS, 'allow_world_readable_tmpfiles', None, False, value_type='boolean') # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs, 9p', value_type='list') DEFAULT_LIBVIRT_LXC_NOSECLABEL = get_config(p, 'selinux', 'libvirt_lxc_noseclabel', 'LIBVIRT_LXC_NOSECLABEL', False, value_type='boolean') ### PRIVILEGE ESCALATION ### # Backwards Compat DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, value_type='boolean') DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None) DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None) DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, value_type='boolean') DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, value_type='boolean') DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None) DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H -S -n') DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, value_type='boolean') # Become BECOME_ERROR_STRINGS = { 'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'doas': 'Permission denied', 'dzdo': '', 'ksu': 'Password incorrect' } # FIXME: deal with i18n BECOME_MISSING_STRINGS = { 'sudo': 'sorry, a password is required to run sudo', 'su': '', 'pbrun': '', 'pfexec': '', 'doas': 'Authorization required', 'dzdo': '', 'ksu': 'No password given' } # FIXME: deal with i18n BECOME_METHODS = ['sudo','su','pbrun','pfexec','doas','dzdo','ksu','runas'] BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, value_type='boolean') DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD', 'sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo').lower() DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, value_type='boolean') DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root') DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None) DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None) DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, value_type='boolean') # PLUGINS # Modules that can optimize with_items loops into a single call. Currently # these modules must (1) take a "name" or "pkg" parameter that is a list. If # the module takes both, bad things could happen. # In the future we should probably generalize this even further # (mapping of param: squash field) DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper", value_type='list') # paths DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', value_type='pathlist') DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', value_type='pathlist') DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', value_type='pathlist') DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', value_type='pathlist') DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', value_type='pathlist') DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', '~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules', value_type='pathlist') DEFAULT_MODULE_UTILS_PATH = get_config(p, DEFAULTS, 'module_utils', 'ANSIBLE_MODULE_UTILS', '~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils', value_type='pathlist') DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', value_type='pathlist') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', value_type='pathlist') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', value_type='pathlist') DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', value_type='pathlist') DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS', '~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', value_type='pathlist') NETWORK_GROUP_MODULES = get_config(p, DEFAULTS, 'network_group_modules','NETWORK_GROUP_MODULES', ['eos', 'nxos', 'ios', 'iosxr', 'junos', 'vyos', 'sros', 'dellos9', 'dellos10', 'dellos6'], value_type='list') DEFAULT_STRATEGY = get_config(p, DEFAULTS, 'strategy', 'ANSIBLE_STRATEGY', 'linear') DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default') # cache CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts') CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, value_type='integer') # Display ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, value_type='boolean') ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, value_type='boolean') ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, value_type='boolean') ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default') ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, value_type='list') DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, value_type='boolean') DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, value_type='boolean') HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, value_type='boolean') SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, value_type='boolean') DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, value_type='boolean') DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], value_type='list') COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, value_type='boolean') DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, value_type='boolean') DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], value_type='list') RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, value_type='boolean') RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, value_type='path') DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, value_type='none') DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'ANSIBLE_DISPLAY_ARGS_TO_STDOUT', False, value_type='boolean') MAX_FILE_SIZE_FOR_DIFF = get_config(p, DEFAULTS, 'max_diff_size', 'ANSIBLE_MAX_DIFF_SIZE', 1024*1024, value_type='integer') # CONNECTION RELATED USE_PERSISTENT_CONNECTIONS = get_config(p, DEFAULTS, 'use_persistent_connections', 'ANSIBLE_USE_PERSISTENT_CONNECTIONS', False, value_type='boolean') ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-C -o ControlMaster=auto -o ControlPersist=60s') ### WARNING: Someone might be tempted to switch this from percent-formatting # to .format() in the future. be sure to read this: # http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/ and understand # that it may be a security risk to do so. ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', None) ANSIBLE_SSH_CONTROL_PATH_DIR = get_config(p, 'ssh_connection', 'control_path_dir', 'ANSIBLE_SSH_CONTROL_PATH_DIR', u'~/.ansible/cp') ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, value_type='boolean') ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, value_type='integer') ANSIBLE_SSH_EXECUTABLE = get_config(p, 'ssh_connection', 'ssh_executable', 'ANSIBLE_SSH_EXECUTABLE', 'ssh') PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, value_type='boolean') PARAMIKO_HOST_KEY_AUTO_ADD = get_config(p, 'paramiko_connection', 'host_key_auto_add', 'ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD', False, value_type='boolean') PARAMIKO_PROXY_COMMAND = get_config(p, 'paramiko_connection', 'proxy_command', 'ANSIBLE_PARAMIKO_PROXY_COMMAND', None) PARAMIKO_LOOK_FOR_KEYS = get_config(p, 'paramiko_connection', 'look_for_keys', 'ANSIBLE_PARAMIKO_LOOK_FOR_KEYS', True, value_type='boolean') PERSISTENT_CONNECT_TIMEOUT = get_config(p, 'persistent_connection', 'connect_timeout', 'ANSIBLE_PERSISTENT_CONNECT_TIMEOUT', 30, value_type='integer') PERSISTENT_CONNECT_RETRIES = get_config(p, 'persistent_connection', 'connect_retries', 'ANSIBLE_PERSISTENT_CONNECT_RETRIES', 30, value_type='integer') PERSISTENT_CONNECT_INTERVAL = get_config(p, 'persistent_connection', 'connect_interval', 'ANSIBLE_PERSISTENT_CONNECT_INTERVAL', 1, value_type='integer') # obsolete -- will be formally removed ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, value_type='integer') ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, value_type='integer') ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, value_type='float') ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, value_type='integer') ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys') ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700') ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600') ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, value_type='boolean') PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, value_type='boolean') # galaxy related GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com') GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, value_type='boolean') # this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', value_type='list') GALAXY_ROLE_SKELETON = get_config(p, 'galaxy', 'role_skeleton', 'ANSIBLE_GALAXY_ROLE_SKELETON', None, value_type='path') GALAXY_ROLE_SKELETON_IGNORE = get_config(p, 'galaxy', 'role_skeleton_ignore', 'ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE', ['^.git$', '^.*/.git_keep$'], value_type='list') STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], value_type='list' ) # colors COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white') COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue') COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple') COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red') COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray') COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple') COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan') COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red') COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green') COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow') COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green') COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red') COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan') # diff DIFF_CONTEXT = get_config(p, 'diff', 'context', 'ANSIBLE_DIFF_CONTEXT', 3, value_type='integer') DIFF_ALWAYS = get_config(p, 'diff', 'always', 'ANSIBLE_DIFF_ALWAYS', False, value_type='bool') # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'win_command', 'shell', 'win_shell', 'raw', 'script'] MODULE_NO_JSON = ['command', 'win_command', 'shell', 'win_shell', 'raw'] DEFAULT_BECOME_PASS = None DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords DEFAULT_SUDO_PASS = None DEFAULT_REMOTE_PASS = None DEFAULT_SUBSET = None DEFAULT_SU_PASS = None VAULT_VERSION_MIN = 1.0 VAULT_VERSION_MAX = 1.0 TREE_DIR = None LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1']) # module search BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt') IGNORE_FILES = ["COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES"] INTERNAL_RESULT_KEYS = ['add_host', 'add_group'] RESTRICTED_RESULT_KEYS = ['ansible_rsync_path', 'ansible_playbook_python']
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3268_2
crossvul-python_data_bad_100_0
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains constants from the specification.""" class Membership(object): """Represents the membership states of a user in a room.""" INVITE = u"invite" JOIN = u"join" KNOCK = u"knock" LEAVE = u"leave" BAN = u"ban" LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN) class PresenceState(object): """Represents the presence state of a user.""" OFFLINE = u"offline" UNAVAILABLE = u"unavailable" ONLINE = u"online" class JoinRules(object): PUBLIC = u"public" KNOCK = u"knock" INVITE = u"invite" PRIVATE = u"private" class LoginType(object): PASSWORD = u"m.login.password" EMAIL_IDENTITY = u"m.login.email.identity" MSISDN = u"m.login.msisdn" RECAPTCHA = u"m.login.recaptcha" DUMMY = u"m.login.dummy" # Only for C/S API v1 APPLICATION_SERVICE = u"m.login.application_service" SHARED_SECRET = u"org.matrix.login.shared_secret" class EventTypes(object): Member = "m.room.member" Create = "m.room.create" JoinRules = "m.room.join_rules" PowerLevels = "m.room.power_levels" Aliases = "m.room.aliases" Redaction = "m.room.redaction" ThirdPartyInvite = "m.room.third_party_invite" RoomHistoryVisibility = "m.room.history_visibility" CanonicalAlias = "m.room.canonical_alias" RoomAvatar = "m.room.avatar" GuestAccess = "m.room.guest_access" # These are used for validation Message = "m.room.message" Topic = "m.room.topic" Name = "m.room.name" class RejectedReason(object): AUTH_ERROR = "auth_error" REPLACED = "replaced" NOT_ANCESTOR = "not_ancestor" class RoomCreationPreset(object): PRIVATE_CHAT = "private_chat" PUBLIC_CHAT = "public_chat" TRUSTED_PRIVATE_CHAT = "trusted_private_chat" class ThirdPartyEntityKind(object): USER = "user" LOCATION = "location"
./CrossVul/dataset_final_sorted/CWE-20/py/bad_100_0
crossvul-python_data_good_2816_2
# -*- coding: utf-8 -*- ''' Zeromq transport classes ''' # Import Python Libs from __future__ import absolute_import import os import sys import copy import errno import signal import hashlib import logging import weakref from random import randint # Import Salt Libs import salt.auth import salt.crypt import salt.utils import salt.utils.verify import salt.utils.event import salt.payload import salt.transport.client import salt.transport.server import salt.transport.mixins.auth from salt.exceptions import SaltReqTimeoutError import zmq import zmq.error import zmq.eventloop.ioloop # support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'): zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop import zmq.eventloop.zmqstream try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False # Import Tornado Libs import tornado import tornado.gen import tornado.concurrent # Import third party libs import salt.ext.six as six try: from Cryptodome.Cipher import PKCS1_OAEP except ImportError: from Crypto.Cipher import PKCS1_OAEP log = logging.getLogger(__name__) class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel): ''' Encapsulate sending routines to ZeroMQ. ZMQ Channels default to 'crypt=aes' ''' # This class is only a singleton per minion/master pair # mapping of io_loop -> {key -> channel} instance_map = weakref.WeakKeyDictionary() def __new__(cls, opts, **kwargs): ''' Only create one instance of channel per __key() ''' # do we have any mapping for this io_loop io_loop = kwargs.get('io_loop') if io_loop is None: zmq.eventloop.ioloop.install() io_loop = tornado.ioloop.IOLoop.current() if io_loop not in cls.instance_map: cls.instance_map[io_loop] = weakref.WeakValueDictionary() loop_instance_map = cls.instance_map[io_loop] key = cls.__key(opts, **kwargs) obj = loop_instance_map.get(key) if obj is None: log.debug('Initializing new AsyncZeroMQReqChannel for {0}'.format(key)) # we need to make a local variable for this, as we are going to store # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller obj = object.__new__(cls) obj.__singleton_init__(opts, **kwargs) loop_instance_map[key] = obj log.trace('Inserted key into loop_instance_map id {0} for key {1} and process {2}'.format(id(loop_instance_map), key, os.getpid())) else: log.debug('Re-using AsyncZeroMQReqChannel for {0}'.format(key)) return obj def __deepcopy__(self, memo): cls = self.__class__ result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args memo[id(self)] = result for key in self.__dict__: if key in ('_io_loop',): continue # The _io_loop has a thread Lock which will fail to be deep # copied. Skip it because it will just be recreated on the # new copy. if key == 'message_client': # Recreate the message client because it will fail to be deep # copied. The reason is the same as the io_loop skip above. setattr(result, key, AsyncReqMessageClientPool(result.opts, args=(result.opts, self.master_uri,), kwargs={'io_loop': self._io_loop})) continue setattr(result, key, copy.deepcopy(self.__dict__[key], memo)) return result @classmethod def __key(cls, opts, **kwargs): return (opts['pki_dir'], # where the keys are stored opts['id'], # minion ID kwargs.get('master_uri', opts.get('master_uri')), # master ID kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt ) # has to remain empty for singletons, since __init__ will *always* be called def __init__(self, opts, **kwargs): pass # an init for the singleton instance to call def __singleton_init__(self, opts, **kwargs): self.opts = dict(opts) self.ttype = 'zeromq' # crypt defaults to 'aes' self.crypt = kwargs.get('crypt', 'aes') if 'master_uri' in kwargs: self.opts['master_uri'] = kwargs['master_uri'] self._io_loop = kwargs.get('io_loop') if self._io_loop is None: zmq.eventloop.ioloop.install() self._io_loop = tornado.ioloop.IOLoop.current() if self.crypt != 'clear': # we don't need to worry about auth as a kwarg, since its a singleton self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop) self.message_client = AsyncReqMessageClientPool(self.opts, args=(self.opts, self.master_uri,), kwargs={'io_loop': self._io_loop}) def __del__(self): ''' Since the message_client creates sockets and assigns them to the IOLoop we have to specifically destroy them, since we aren't the only ones with references to the FDs ''' if hasattr(self, 'message_client'): self.message_client.destroy() else: log.debug('No message_client attr for AsyncZeroMQReqChannel found. Not destroying sockets.') @property def master_uri(self): return self.opts['master_uri'] def _package_load(self, load): return { 'enc': self.crypt, 'load': load, } @tornado.gen.coroutine def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60): if not self.auth.authenticated: # Return controle back to the caller, continue when authentication succeeds yield self.auth.authenticate() # Return control to the caller. When send() completes, resume by populating ret with the Future.result ret = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) key = self.auth.get_keys() cipher = PKCS1_OAEP.new(key) if 'key' not in ret: # Reauth in the case our key is deleted on the master side. yield self.auth.authenticate() ret = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) aes = cipher.decrypt(ret['key']) pcrypt = salt.crypt.Crypticle(self.opts, aes) data = pcrypt.loads(ret[dictkey]) if six.PY3: data = salt.transport.frame.decode_embedded_strs(data) raise tornado.gen.Return(data) @tornado.gen.coroutine def _crypted_transfer(self, load, tries=3, timeout=60, raw=False): ''' Send a load across the wire, with encryption In case of authentication errors, try to renegotiate authentication and retry the method. Indeed, we can fail too early in case of a master restart during a minion state execution call :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing ''' @tornado.gen.coroutine def _do_transfer(): # Yield control to the caller. When send() completes, resume by populating data with the Future.result data = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) # we may not have always data # as for example for saltcall ret submission, this is a blind # communication, we do not subscribe to return events, we just # upload the results to the master if data: data = self.auth.crypticle.loads(data, raw) if six.PY3 and not raw: data = salt.transport.frame.decode_embedded_strs(data) raise tornado.gen.Return(data) if not self.auth.authenticated: # Return control back to the caller, resume when authentication succeeds yield self.auth.authenticate() try: # We did not get data back the first time. Retry. ret = yield _do_transfer() except salt.crypt.AuthenticationError: # If auth error, return control back to the caller, continue when authentication succeeds yield self.auth.authenticate() ret = yield _do_transfer() raise tornado.gen.Return(ret) @tornado.gen.coroutine def _uncrypted_transfer(self, load, tries=3, timeout=60): ''' Send a load across the wire in cleartext :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing ''' ret = yield self.message_client.send( self._package_load(load), timeout=timeout, tries=tries, ) raise tornado.gen.Return(ret) @tornado.gen.coroutine def send(self, load, tries=3, timeout=60, raw=False): ''' Send a request, return a future which will complete when we send the message ''' if self.crypt == 'clear': ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout) else: ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw) raise tornado.gen.Return(ret) class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel): ''' A transport channel backed by ZeroMQ for a Salt Publisher to use to publish commands to connected minions ''' def __init__(self, opts, **kwargs): self.opts = opts self.ttype = 'zeromq' self.io_loop = kwargs.get('io_loop') if self.io_loop is None: zmq.eventloop.ioloop.install() self.io_loop = tornado.ioloop.IOLoop.current() self.hexid = hashlib.sha1(six.b(self.opts['id'])).hexdigest() self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop) self.serial = salt.payload.Serial(self.opts) self.context = zmq.Context() self._socket = self.context.socket(zmq.SUB) if self.opts['zmq_filtering']: # TODO: constants file for "broadcast" self._socket.setsockopt(zmq.SUBSCRIBE, b'broadcast') self._socket.setsockopt(zmq.SUBSCRIBE, self.hexid) else: self._socket.setsockopt(zmq.SUBSCRIBE, b'') self._socket.setsockopt(zmq.IDENTITY, salt.utils.to_bytes(self.opts['id'])) # TODO: cleanup all the socket opts stuff if hasattr(zmq, 'TCP_KEEPALIVE'): self._socket.setsockopt( zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl'] ) recon_delay = self.opts['recon_default'] if self.opts['recon_randomize']: recon_delay = randint(self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'] ) log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format( self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'], recon_delay) ) log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay)) self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay) if hasattr(zmq, 'RECONNECT_IVL_MAX'): log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format( self.opts['recon_default'] + self.opts['recon_max']) ) self._socket.setsockopt( zmq.RECONNECT_IVL_MAX, self.opts['recon_max'] ) if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self._socket.setsockopt(zmq.IPV4ONLY, 0) if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']: self._monitor = ZeroMQSocketMonitor(self._socket) self._monitor.start_io_loop(self.io_loop) def destroy(self): if hasattr(self, '_monitor') and self._monitor is not None: self._monitor.stop() self._monitor = None if hasattr(self, '_stream'): # TODO: Optionally call stream.close() on newer pyzmq? Its broken on some self._stream.io_loop.remove_handler(self._stream.socket) self._stream.socket.close(0) elif hasattr(self, '_socket'): self._socket.close(0) if hasattr(self, 'context') and self.context.closed is False: self.context.term() def __del__(self): self.destroy() # TODO: this is the time to see if we are connected, maybe use the req channel to guess? @tornado.gen.coroutine def connect(self): if not self.auth.authenticated: yield self.auth.authenticate() self.publish_port = self.auth.creds['publish_port'] self._socket.connect(self.master_pub) @property def master_pub(self): ''' Return the master publish port ''' return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'], port=self.publish_port) @tornado.gen.coroutine def _decode_messages(self, messages): ''' Take the zmq messages, decrypt/decode them into a payload :param list messages: A list of messages to be decoded ''' messages_len = len(messages) # if it was one message, then its old style if messages_len == 1: payload = self.serial.loads(messages[0]) # 2 includes a header which says who should do it elif messages_len == 2: if messages[0] not in ('broadcast', self.hexid): log.debug('Publish received for not this minion: {0}'.format(messages[0])) raise tornado.gen.Return(None) payload = self.serial.loads(messages[1]) else: raise Exception(('Invalid number of messages ({0}) in zeromq pub' 'message from master').format(len(messages_len))) # Yield control back to the caller. When the payload has been decoded, assign # the decoded payload to 'ret' and resume operation ret = yield self._decode_payload(payload) raise tornado.gen.Return(ret) @property def stream(self): ''' Return the current zmqstream, creating one if necessary ''' if not hasattr(self, '_stream'): self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop) return self._stream def on_recv(self, callback): ''' Register a callback for received messages (that we didn't initiate) :param func callback: A function which should be called when data is received ''' if callback is None: return self.stream.on_recv(None) @tornado.gen.coroutine def wrap_callback(messages): payload = yield self._decode_messages(messages) if payload is not None: callback(payload) return self.stream.on_recv(wrap_callback) class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel): def __init__(self, opts): salt.transport.server.ReqServerChannel.__init__(self, opts) self._closing = False def zmq_device(self): ''' Multiprocessing target for the zmq queue device ''' self.__setup_signals() salt.utils.appendproctitle('MWorkerQueue') self.context = zmq.Context(self.opts['worker_threads']) # Prepare the zeromq sockets self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts) self.clients = self.context.socket(zmq.ROUTER) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self.clients.setsockopt(zmq.IPV4ONLY, 0) self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000)) if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']: # Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here import threading self._monitor = ZeroMQSocketMonitor(self.clients) t = threading.Thread(target=self._monitor.start_poll) t.start() self.workers = self.context.socket(zmq.DEALER) if self.opts.get('ipc_mode', '') == 'tcp': self.w_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_workers', 4515) ) else: self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Setting up the master communication server') self.clients.bind(self.uri) self.workers.bind(self.w_uri) while True: if self.clients.closed or self.workers.closed: break try: zmq.device(zmq.QUEUE, self.clients, self.workers) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except (KeyboardInterrupt, SystemExit): break def close(self): ''' Cleanly shutdown the router socket ''' if self._closing: return log.info('MWorkerQueue under PID %s is closing', os.getpid()) self._closing = True if hasattr(self, '_monitor') and self._monitor is not None: self._monitor.stop() self._monitor = None if hasattr(self, '_w_monitor') and self._w_monitor is not None: self._w_monitor.stop() self._w_monitor = None if hasattr(self, 'clients') and self.clients.closed is False: self.clients.close() if hasattr(self, 'workers') and self.workers.closed is False: self.workers.close() if hasattr(self, 'stream'): self.stream.close() if hasattr(self, '_socket') and self._socket.closed is False: self._socket.close() if hasattr(self, 'context') and self.context.closed is False: self.context.term() def pre_fork(self, process_manager): ''' Pre-fork we need to create the zmq router device :param func process_manager: An instance of salt.utils.process.ProcessManager ''' salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager) process_manager.add_process(self.zmq_device) def post_fork(self, payload_handler, io_loop): ''' After forking we need to create all of the local sockets to listen to the router :param func payload_handler: A function to called to handle incoming payloads as they are picked up off the wire :param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling ''' self.payload_handler = payload_handler self.io_loop = io_loop self.context = zmq.Context(1) self._socket = self.context.socket(zmq.REP) if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']: # Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here import threading self._w_monitor = ZeroMQSocketMonitor(self._socket) t = threading.Thread(target=self._w_monitor.start_poll) t.start() if self.opts.get('ipc_mode', '') == 'tcp': self.w_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_workers', 4515) ) else: self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Worker binding to socket {0}'.format(self.w_uri)) self._socket.connect(self.w_uri) salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop) self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop) self.stream.on_recv_stream(self.handle_message) @tornado.gen.coroutine def handle_message(self, stream, payload): ''' Handle incoming messages from underylying TCP streams :stream ZMQStream stream: A ZeroMQ stream. See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html :param dict payload: A payload to process ''' try: payload = self.serial.loads(payload[0]) payload = self._decode_payload(payload) except Exception as exc: exc_type = type(exc).__name__ if exc_type == 'AuthenticationError': log.debug( 'Minion failed to auth to master. Since the payload is ' 'encrypted, it is not known which minion failed to ' 'authenticate. It is likely that this is a transient ' 'failure due to the master rotating its public key.' ) else: log.error('Bad load from minion: %s: %s', exc_type, exc) stream.send(self.serial.dumps('bad load')) raise tornado.gen.Return() # TODO helper functions to normalize payload? if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict): log.error('payload and load must be a dict. Payload was: {0} and load was {1}'.format(payload, payload.get('load'))) stream.send(self.serial.dumps('payload and load must be a dict')) raise tornado.gen.Return() try: id_ = payload['load'].get('id', '') if '\0' in id_: log.error('Payload contains an id with a null byte: %s', payload) stream.send(self.serial.dumps('bad load: id contains a null byte')) raise tornado.gen.Return() except TypeError: log.error('Payload contains non-string id: %s', payload) stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_))) raise tornado.gen.Return() # intercept the "_auth" commands, since the main daemon shouldn't know # anything about our key auth if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth': stream.send(self.serial.dumps(self._auth(payload['load']))) raise tornado.gen.Return() # TODO: test try: # Take the payload_handler function that was registered when we created the channel # and call it, returning control to the caller until it completes ret, req_opts = yield self.payload_handler(payload) except Exception as e: # always attempt to return an error to the minion stream.send('Some exception handling minion payload') log.error('Some exception handling a payload from minion', exc_info=True) raise tornado.gen.Return() req_fun = req_opts.get('fun', 'send') if req_fun == 'send_clear': stream.send(self.serial.dumps(ret)) elif req_fun == 'send': stream.send(self.serial.dumps(self.crypticle.dumps(ret))) elif req_fun == 'send_private': stream.send(self.serial.dumps(self._encrypt_private(ret, req_opts['key'], req_opts['tgt'], ))) else: log.error('Unknown req_fun {0}'.format(req_fun)) # always attempt to return an error to the minion stream.send('Server-side exception handling payload') raise tornado.gen.Return() def __setup_signals(self): signal.signal(signal.SIGINT, self._handle_signals) signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): msg = '{0} received a '.format(self.__class__.__name__) if signum == signal.SIGINT: msg += 'SIGINT' elif signum == signal.SIGTERM: msg += 'SIGTERM' msg += '. Exiting' log.debug(msg) self.close() sys.exit(salt.defaults.exitcodes.EX_OK) def _set_tcp_keepalive(zmq_socket, opts): ''' Ensure that TCP keepalives are set as specified in "opts". Warning: Failure to set TCP keepalives on the salt-master can result in not detecting the loss of a minion when the connection is lost or when it's host has been terminated without first closing the socket. Salt's Presence System depends on this connection status to know if a minion is "present". Warning: Failure to set TCP keepalives on minions can result in frequent or unexpected disconnects! ''' if hasattr(zmq, 'TCP_KEEPALIVE') and opts: if 'tcp_keepalive' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE, opts['tcp_keepalive'] ) if 'tcp_keepalive_idle' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle'] ) if 'tcp_keepalive_cnt' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt'] ) if 'tcp_keepalive_intvl' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl'] ) class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel): ''' Encapsulate synchronous operations for a publisher channel ''' def __init__(self, opts): self.opts = opts self.serial = salt.payload.Serial(self.opts) # TODO: in init? self.ckminions = salt.utils.minions.CkMinions(self.opts) def connect(self): return tornado.gen.sleep(5) def _publish_daemon(self): ''' Bind to the interface specified in the configuration file ''' salt.utils.appendproctitle(self.__class__.__name__) # Set up the context context = zmq.Context(1) # Prepare minion publish socket pub_sock = context.socket(zmq.PUB) _set_tcp_keepalive(pub_sock, self.opts) # if 2.1 >= zmq < 3.0, we only have one HWM setting try: pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000)) # in zmq >= 3.0, there are separate send and receive HWM settings except AttributeError: # Set the High Water Marks. For more information on HWM, see: # http://api.zeromq.org/4-1:zmq-setsockopt pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000)) pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000)) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses pub_sock.setsockopt(zmq.IPV4ONLY, 0) pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000)) pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts) # Prepare minion pull socket pull_sock = context.socket(zmq.PULL) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) salt.utils.zeromq.check_ipc_path_max_len(pull_uri) # Start the minion command publisher log.info('Starting the Salt Publisher on {0}'.format(pub_uri)) pub_sock.bind(pub_uri) # Securely create socket log.info('Starting the Salt Puller on {0}'.format(pull_uri)) old_umask = os.umask(0o177) try: pull_sock.bind(pull_uri) finally: os.umask(old_umask) try: while True: # Catch and handle EINTR from when this process is sent # SIGUSR1 gracefully so we don't choke and die horribly try: package = pull_sock.recv() unpacked_package = salt.payload.unpackage(package) if six.PY3: unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package) payload = unpacked_package['payload'] if self.opts['zmq_filtering']: # if you have a specific topic list, use that if 'topic_lst' in unpacked_package: for topic in unpacked_package['topic_lst']: # zmq filters are substring match, hash the topic # to avoid collisions htopic = hashlib.sha1(topic).hexdigest() pub_sock.send(htopic, flags=zmq.SNDMORE) pub_sock.send(payload) # otherwise its a broadcast else: # TODO: constants file for "broadcast" pub_sock.send('broadcast', flags=zmq.SNDMORE) pub_sock.send(payload) else: pub_sock.send(payload) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except KeyboardInterrupt: # Cleanly close the sockets if we're shutting down if pub_sock.closed is False: pub_sock.setsockopt(zmq.LINGER, 1) pub_sock.close() if pull_sock.closed is False: pull_sock.setsockopt(zmq.LINGER, 1) pull_sock.close() if context.closed is False: context.term() def pre_fork(self, process_manager): ''' Do anything necessary pre-fork. Since this is on the master side this will primarily be used to create IPC channels and create our daemon process to do the actual publishing :param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager ''' process_manager.add_process(self._publish_daemon) def publish(self, load): ''' Publish "load" to minions :param dict load: A load to be sent across the wire to minions ''' payload = {'enc': 'aes'} crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value) payload['load'] = crypticle.dumps(load) if self.opts['sign_pub_messages']: master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem') log.debug("Signing data packet") payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load']) # Send 0MQ to the publisher context = zmq.Context(1) pub_sock = context.socket(zmq.PUSH) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) pub_sock.connect(pull_uri) int_payload = {'payload': self.serial.dumps(payload)} # add some targeting stuff for lists only (for now) if load['tgt_type'] == 'list': int_payload['topic_lst'] = load['tgt'] # If zmq_filtering is enabled, target matching has to happen master side match_targets = ["pcre", "glob", "list"] if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets: # Fetch a list of minions that match match_ids = self.ckminions.check_minions(load['tgt'], tgt_type=load['tgt_type']) log.debug("Publish Side Match: {0}".format(match_ids)) # Send list of miions thru so zmq can target them int_payload['topic_lst'] = match_ids pub_sock.send(self.serial.dumps(int_payload)) pub_sock.close() context.term() class AsyncReqMessageClientPool(salt.transport.MessageClientPool): ''' Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket. ''' def __init__(self, opts, args=None, kwargs=None): super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs) def __del__(self): self.destroy() def destroy(self): for message_client in self.message_clients: message_client.destroy() self.message_clients = [] def send(self, *args, **kwargs): message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue)) return message_clients[0].send(*args, **kwargs) # TODO: unit tests! class AsyncReqMessageClient(object): ''' This class wraps the underylying zeromq REQ socket and gives a future-based interface to sending and recieving messages. This works around the primary limitation of serialized send/recv on the underlying socket by queueing the message sends in this class. In the future if we decide to attempt to multiplex we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial ''' def __init__(self, opts, addr, linger=0, io_loop=None): ''' Create an asynchronous message client :param dict opts: The salt opts dictionary :param str addr: The interface IP address to bind to :param int linger: The number of seconds to linger on a ZMQ socket. See http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER] :param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop] ''' self.opts = opts self.addr = addr self.linger = linger if io_loop is None: zmq.eventloop.ioloop.install() tornado.ioloop.IOLoop.current() else: self.io_loop = io_loop self.serial = salt.payload.Serial(self.opts) self.context = zmq.Context() # wire up sockets self._init_socket() self.send_queue = [] # mapping of message -> future self.send_future_map = {} self.send_timeout_map = {} # message -> timeout # TODO: timeout all in-flight sessions, or error def destroy(self): if hasattr(self, 'stream') and self.stream is not None: # TODO: Optionally call stream.close() on newer pyzmq? It is broken on some. if self.stream.socket: self.stream.socket.close() self.stream.io_loop.remove_handler(self.stream.socket) # set this to None, more hacks for messed up pyzmq self.stream.socket = None self.stream = None self.socket.close() if self.context.closed is False: self.context.term() def __del__(self): self.destroy() def _init_socket(self): if hasattr(self, 'stream'): self.stream.close() # pylint: disable=E0203 self.socket.close() # pylint: disable=E0203 del self.stream del self.socket self.socket = self.context.socket(zmq.REQ) # socket options if hasattr(zmq, 'RECONNECT_IVL_MAX'): self.socket.setsockopt( zmq.RECONNECT_IVL_MAX, 5000 ) _set_tcp_keepalive(self.socket, self.opts) if self.addr.startswith('tcp://['): # Hint PF type if bracket enclosed IPv6 address if hasattr(zmq, 'IPV6'): self.socket.setsockopt(zmq.IPV6, 1) elif hasattr(zmq, 'IPV4ONLY'): self.socket.setsockopt(zmq.IPV4ONLY, 0) self.socket.linger = self.linger self.socket.connect(self.addr) self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop) @tornado.gen.coroutine def _internal_send_recv(self): while len(self.send_queue) > 0: message = self.send_queue[0] future = self.send_future_map.get(message, None) if future is None: # Timedout del self.send_queue[0] continue # send def mark_future(msg): if not future.done(): data = self.serial.loads(msg[0]) future.set_result(data) self.stream.on_recv(mark_future) self.stream.send(message) try: ret = yield future except: # pylint: disable=W0702 self._init_socket() # re-init the zmq socket (no other way in zmq) del self.send_queue[0] continue del self.send_queue[0] self.send_future_map.pop(message, None) self.remove_message_timeout(message) def remove_message_timeout(self, message): if message not in self.send_timeout_map: return timeout = self.send_timeout_map.pop(message, None) if timeout is not None: # Hasn't been already timedout self.io_loop.remove_timeout(timeout) def timeout_message(self, message): ''' Handle a message timeout by removing it from the sending queue and informing the caller :raises: SaltReqTimeoutError ''' future = self.send_future_map.pop(message, None) # In a race condition the message might have been sent by the time # we're timing it out. Make sure the future is not None if future is not None: del self.send_timeout_map[message] if future.attempts < future.tries: future.attempts += 1 log.debug('SaltReqTimeoutError, retrying. ({0}/{1})'.format(future.attempts, future.tries)) self.send( message, timeout=future.timeout, tries=future.tries, future=future, ) else: future.set_exception(SaltReqTimeoutError('Message timed out')) def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False): ''' Return a future which will be completed when the message has a response ''' if future is None: future = tornado.concurrent.Future() future.tries = tries future.attempts = 0 future.timeout = timeout # if a future wasn't passed in, we need to serialize the message message = self.serial.dumps(message) if callback is not None: def handle_future(future): response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) # Add this future to the mapping self.send_future_map[message] = future if self.opts.get('detect_mode') is True: timeout = 1 if timeout is not None: send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message) self.send_timeout_map[message] = send_timeout if len(self.send_queue) == 0: self.io_loop.spawn_callback(self._internal_send_recv) self.send_queue.append(message) return future class ZeroMQSocketMonitor(object): __EVENT_MAP = None def __init__(self, socket): ''' Create ZMQ monitor sockets More information: http://api.zeromq.org/4-0:zmq-socket-monitor ''' self._socket = socket self._monitor_socket = self._socket.get_monitor_socket() self._monitor_stream = None def start_io_loop(self, io_loop): log.trace("Event monitor start!") self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop) self._monitor_stream.on_recv(self.monitor_callback) def start_poll(self): log.trace("Event monitor start!") try: while self._monitor_socket is not None and self._monitor_socket.poll(): msg = self._monitor_socket.recv_multipart() self.monitor_callback(msg) except (AttributeError, zmq.error.ContextTerminated): # We cannot log here because we'll get an interrupted system call in trying # to flush the logging buffer as we terminate pass @property def event_map(self): if ZeroMQSocketMonitor.__EVENT_MAP is None: event_map = {} for name in dir(zmq): if name.startswith('EVENT_'): value = getattr(zmq, name) event_map[value] = name ZeroMQSocketMonitor.__EVENT_MAP = event_map return ZeroMQSocketMonitor.__EVENT_MAP def monitor_callback(self, msg): evt = zmq.utils.monitor.parse_monitor_message(msg) evt['description'] = self.event_map[evt['event']] log.debug("ZeroMQ event: {0}".format(evt)) if evt['event'] == zmq.EVENT_MONITOR_STOPPED: self.stop() def stop(self): if self._socket is None: return self._socket.disable_monitor() self._socket = None self._monitor_socket = None if self._monitor_stream is not None: self._monitor_stream.close() self._monitor_stream = None log.trace("Event monitor done!")
./CrossVul/dataset_final_sorted/CWE-20/py/good_2816_2
crossvul-python_data_bad_3660_1
# Copyright 2011 OpenStack LLC. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" import urllib from xml.dom import minidom import webob from webob import exc from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging from nova.openstack.common import excutils from nova.openstack.common import importutils from nova import quota from nova import utils LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS QUOTAS = quota.QUOTAS authorize = extensions.extension_authorizer('compute', 'security_groups') def make_rule(elem): elem.set('id') elem.set('parent_group_id') proto = xmlutil.SubTemplateElement(elem, 'ip_protocol') proto.text = 'ip_protocol' from_port = xmlutil.SubTemplateElement(elem, 'from_port') from_port.text = 'from_port' to_port = xmlutil.SubTemplateElement(elem, 'to_port') to_port.text = 'to_port' group = xmlutil.SubTemplateElement(elem, 'group', selector='group') name = xmlutil.SubTemplateElement(group, 'name') name.text = 'name' tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id') tenant_id.text = 'tenant_id' ip_range = xmlutil.SubTemplateElement(elem, 'ip_range', selector='ip_range') cidr = xmlutil.SubTemplateElement(ip_range, 'cidr') cidr.text = 'cidr' def make_sg(elem): elem.set('id') elem.set('tenant_id') elem.set('name') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' rules = xmlutil.SubTemplateElement(elem, 'rules') rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules') make_rule(rule) sg_nsmap = {None: wsgi.XMLNS_V11} class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group_rule', selector='security_group_rule') make_rule(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group', selector='security_group') make_sg(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_groups') elem = xmlutil.SubTemplateElement(root, 'security_group', selector='security_groups') make_sg(elem) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): self.compute_api = compute.API() self.sgh = importutils.import_object(FLAGS.security_group_handler) def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule.id sg_rule['parent_group_id'] = rule.parent_group_id sg_rule['ip_protocol'] = rule.protocol sg_rule['from_port'] = rule.from_port sg_rule['to_port'] = rule.to_port sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule.group_id: source_group = db.security_group_get(context, rule.group_id) sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: sg_rule['ip_range'] = {'cidr': rule.cidr} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group.id security_group['description'] = group.description security_group['name'] = group.name security_group['tenant_id'] = group.project_id security_group['rules'] = [] for rule in group.rules: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group class SecurityGroupController(SecurityGroupControllerBase): """The Security group API controller for the OpenStack API.""" def _get_security_group(self, context, id): try: id = int(id) security_group = db.security_group_get(context, id) except ValueError: msg = _("Security group id should be integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) return security_group @wsgi.serializers(xml=SecurityGroupTemplate) def show(self, req, id): """Return data about the given security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) if db.security_group_in_use(context, security_group.id): msg = _("Security group is still in use") raise exc.HTTPBadRequest(explanation=msg) # Get reservations try: reservations = QUOTAS.reserve(context, security_groups=-1) except Exception: reservations = None LOG.exception(_("Failed to update usages deallocating " "security group")) LOG.audit(_("Delete security group %s"), id, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh( context, security_group.id) # Commit the reservations if reservations: QUOTAS.commit(context, reservations) return webob.Response(status_int=202) @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req): """Returns a list of security groups""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) groups = db.security_group_get_by_project(context, context.project_id) limited_list = common.limited(groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @wsgi.serializers(xml=SecurityGroupTemplate) @wsgi.deserializers(xml=SecurityGroupXMLDeserializer) def create(self, req, body): """Creates a new security group.""" context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() security_group = body.get('security_group', None) if security_group is None: raise exc.HTTPUnprocessableEntity() group_name = security_group.get('name', None) group_description = security_group.get('description', None) self._validate_security_group_property(group_name, "name") self._validate_security_group_property(group_description, "description") group_name = group_name.strip() group_description = group_description.strip() try: reservations = QUOTAS.reserve(context, security_groups=1) except exception.OverQuota: msg = _("Quota exceeded, too many security groups.") raise exc.HTTPBadRequest(explanation=msg) try: LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('Security group %s already exists') % group_name raise exc.HTTPBadRequest(explanation=msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) # Commit the reservation QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) return {'security_group': self._format_security_group(context, group_ref)} def _validate_security_group_property(self, value, typ): """ typ will be either 'name' or 'description', depending on the caller """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % typ raise exc.HTTPBadRequest(explanation=msg) if not val: msg = _("Security group %s cannot be empty.") % typ raise exc.HTTPBadRequest(explanation=msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) class SecurityGroupRulesController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupRuleTemplate) @wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer) def create(self, req, body): context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() if not 'security_group_rule' in body: raise exc.HTTPUnprocessableEntity() self.compute_api.ensure_default_security_group(context) sg_rule = body['security_group_rule'] parent_group_id = sg_rule.get('parent_group_id', None) try: parent_group_id = int(parent_group_id) security_group = db.security_group_get(context, parent_group_id) except ValueError: msg = _("Parent group id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Security group (%s) not found") % parent_group_id raise exc.HTTPNotFound(explanation=msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) try: values = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), parent_group_id=sg_rule.get('parent_group_id'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if values is None: msg = _("Not enough parameters to build a " "valid rule.") raise exc.HTTPBadRequest(explanation=msg) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): msg = _('This rule already exists in group %s') % parent_group_id raise exc.HTTPBadRequest(explanation=msg) count = QUOTAS.count(context, 'security_group_rules', parent_group_id) try: QUOTAS.limit_check(context, security_group_rules=count + 1) except exception.OverQuota: msg = _("Quota exceeded, too many security group rules.") raise exc.HTTPBadRequest(explanation=msg) security_group_rule = db.security_group_rule_create(context, values) self.sgh.trigger_security_group_rule_create_refresh( context, [security_group_rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return True return False def _rule_args_to_dict(self, context, to_port=None, from_port=None, parent_group_id=None, ip_protocol=None, cidr=None, group_id=None): values = {} if group_id is not None: try: parent_group_id = int(parent_group_id) group_id = int(group_id) except ValueError: msg = _("Parent or group id is not integer") raise exception.InvalidInput(reason=msg) values['group_id'] = group_id #check if groupId exists db.security_group_get(context, group_id) elif cidr: # If this fails, it throws an exception. This is what we want. try: cidr = urllib.unquote(cidr).decode() except Exception: raise exception.InvalidCidr(cidr=cidr) if not utils.is_valid_cidr(cidr): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr) values['cidr'] = cidr else: values['cidr'] = '0.0.0.0/0' if group_id: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and from_port > to_port): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def delete(self, req, id): context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: id = int(id) rule = db.security_group_rule_get(context, id) except ValueError: msg = _("Rule id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound: msg = _("Rule (%s) not found") % id raise exc.HTTPNotFound(explanation=msg) group_id = rule.parent_group_id self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get(context, group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, [rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return webob.Response(status_int=202) class ServerSecurityGroupController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: instance = self.compute_api.get(context, server_id) groups = db.security_group_get_by_instance(context, instance['id']) except exception.ApiError, e: raise webob.exc.HTTPBadRequest(explanation=e.message) except exception.NotAuthorized, e: raise webob.exc.HTTPUnauthorized() result = [self._format_security_group(context, group) for group in groups] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} class SecurityGroupActionController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupActionController, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.sgh = importutils.import_object(FLAGS.security_group_handler) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['addSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.add_security_group(context, instance, group_name) self.sgh.trigger_instance_add_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['removeSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.remove_security_group(context, instance, group_name) self.sgh.trigger_instance_remove_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) class Security_groups(extensions.ExtensionDescriptor): """Security group support""" name = "SecurityGroups" alias = "security_groups" namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" updated = "2011-07-21T00:00:00+00:00" def get_controller_extensions(self): controller = SecurityGroupActionController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): resources = [] res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController()) resources.append(res) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController()) resources.append(res) res = extensions.ResourceExtension( 'os-security-groups', controller=ServerSecurityGroupController(), parent=dict(member_name='server', collection_name='servers')) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3660_1
crossvul-python_data_good_3768_1
import os import re import urllib from django.conf import settings from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME from django.contrib.auth.forms import AuthenticationForm from django.contrib.sites.models import Site, RequestSite from django.contrib.auth.models import User from django.test import TestCase from django.core import mail from django.core.exceptions import SuspiciousOperation from django.core.urlresolvers import reverse from django.http import QueryDict class AuthViewsTestCase(TestCase): """ Helper base class for all the follow test cases. """ fixtures = ['authtestdata.json'] urls = 'django.contrib.auth.tests.urls' def setUp(self): self.old_LANGUAGES = settings.LANGUAGES self.old_LANGUAGE_CODE = settings.LANGUAGE_CODE settings.LANGUAGES = (('en', 'English'),) settings.LANGUAGE_CODE = 'en' self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS settings.TEMPLATE_DIRS = ( os.path.join(os.path.dirname(__file__), 'templates'), ) def tearDown(self): settings.LANGUAGES = self.old_LANGUAGES settings.LANGUAGE_CODE = self.old_LANGUAGE_CODE settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS def login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password } ) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL)) self.assertTrue(SESSION_KEY in self.client.session) class PasswordResetTest(AuthViewsTestCase): def test_email_not_found(self): "Error is raised if the provided email address isn't currently registered" response = self.client.get('/password_reset/') self.assertEqual(response.status_code, 200) response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'}) self.assertContains(response, "That e-mail address doesn&#39;t have an associated user account") self.assertEqual(len(mail.outbox), 0) def test_email_found(self): "Email is sent if a valid email address is provided for password reset" response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) def test_email_found_custom_from(self): "Email is sent if a valid email address is provided for password reset when a custom from_email is provided." response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertEqual("staffmember@example.com", mail.outbox[0].from_email) def test_admin_reset(self): "If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override." response = self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='adminsite.com' ) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://adminsite.com" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) def test_poisoned_http_host(self): "Poisoned HTTP_HOST headers can't be used for reset emails" # This attack is based on the way browsers handle URLs. The colon # should be used to separate the port, but if the URL contains an @, # the colon is interpreted as part of a username for login purposes, # making 'evil.com' the request domain. Since HTTP_HOST is used to # produce a meaningful reset URL, we need to be certain that the # HTTP_HOST header isn't poisoned. This is done as a check when get_host() # is invoked, but we check here as a practical consequence. def test_host_poisoning(): self.client.post('/password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertRaises(SuspiciousOperation, test_host_poisoning) self.assertEqual(len(mail.outbox), 0) def test_poisoned_http_host_admin_site(self): "Poisoned HTTP_HOST headers can't be used for reset emails on admin views" def test_host_poisoning(): self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertRaises(SuspiciousOperation, test_host_poisoning) self.assertEqual(len(mail.outbox), 0) def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertTrue(urlmatch is not None, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertEqual(response.status_code, 200) self.assertTrue("Please enter your new password" in response.content) def test_confirm_invalid(self): url, path = self._test_confirm_start() # Let's munge the token in the path, but keep the same length, # in case the URLconf will reject a different length. path = path[:-5] + ("0"*4) + path[-1] response = self.client.get(path) self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_invalid_user(self): # Ensure that we get a 200 response for a non-existant user, not a 404 response = self.client.get('/reset/123456-1-1/') self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_overflow_user(self): # Ensure that we get a 200 response for a base36 user id that overflows int response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/') self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_invalid_post(self): # Same as test_confirm_invalid, but trying # to do a POST instead. url, path = self._test_confirm_start() path = path[:-5] + ("0"*4) + path[-1] response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2':' anewpassword'}) # Check the password has not been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(not u.check_password("anewpassword")) def test_confirm_complete(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) # It redirects us to a 'complete' page: self.assertEqual(response.status_code, 302) # Check the password has been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(u.check_password("anewpassword")) # Check we can't use the link again response = self.client.get(path) self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_different_passwords(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2':' x'}) self.assertEqual(response.status_code, 200) self.assertTrue("The two password fields didn&#39;t match" in response.content) class ChangePasswordTest(AuthViewsTestCase): def fail_login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password } ) self.assertEqual(response.status_code, 200) self.assertTrue("Please enter a correct username and password. Note that both fields are case-sensitive." in response.content) def logout(self): response = self.client.get('/logout/') def test_password_change_fails_with_invalid_old_password(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'donuts', 'new_password1': 'password1', 'new_password2': 'password1', } ) self.assertEqual(response.status_code, 200) self.assertTrue("Your old password was entered incorrectly. Please enter it again." in response.content) def test_password_change_fails_with_mismatched_passwords(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'donuts', } ) self.assertEqual(response.status_code, 200) self.assertTrue("The two password fields didn&#39;t match." in response.content) def test_password_change_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', } ) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) self.fail_login() self.login(password='password1') class LoginTest(AuthViewsTestCase): def test_current_site_in_context_after_login(self): response = self.client.get(reverse('django.contrib.auth.views.login')) self.assertEqual(response.status_code, 200) if Site._meta.installed: site = Site.objects.get_current() self.assertEqual(response.context['site'], site) self.assertEqual(response.context['site_name'], site.name) else: self.assertIsInstance(response.context['site'], RequestSite) self.assertTrue(isinstance(response.context['form'], AuthenticationForm), 'Login form is not an AuthenticationForm') def test_security_check(self, password='password'): login_url = reverse('django.contrib.auth.views.login') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urllib.quote(bad_url) } response = self.client.post(nasty_url, { 'username': 'testclient', 'password': password, } ) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', '//testserver/', '/url%20with%20spaces/', # see ticket #12534 ): safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urllib.quote(good_url) } response = self.client.post(safe_url, { 'username': 'testclient', 'password': password, } ) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) class LoginURLSettings(AuthViewsTestCase): urls = 'django.contrib.auth.tests.urls' def setUp(self): super(LoginURLSettings, self).setUp() self.old_LOGIN_URL = settings.LOGIN_URL def tearDown(self): super(LoginURLSettings, self).tearDown() settings.LOGIN_URL = self.old_LOGIN_URL def get_login_required_url(self, login_url): settings.LOGIN_URL = login_url response = self.client.get('/login_required/') self.assertEqual(response.status_code, 302) return response['Location'] def test_standard_login_url(self): login_url = '/login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver%s?%s' % (login_url, querystring.urlencode('/'))) def test_remote_login_url(self): login_url = 'http://remote.example.com/login' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_https_login_url(self): login_url = 'https:///login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_login_url_with_querystring(self): login_url = '/login/?pretty=1' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('pretty=1', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver/login/?%s' % querystring.urlencode('/')) def test_remote_login_url_with_next_querystring(self): login_url = 'http://remote.example.com/login/' login_required_url = self.get_login_required_url('%s?next=/default/' % login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) class LogoutTest(AuthViewsTestCase): urls = 'django.contrib.auth.tests.urls' def confirm_logged_out(self): self.assertTrue(SESSION_KEY not in self.client.session) def test_logout_default(self): "Logout without next_page option renders the default template" self.login() response = self.client.get('/logout/') self.assertEqual(200, response.status_code) self.assertTrue('Logged out' in response.content) self.confirm_logged_out() def test_14377(self): # Bug 14377 self.login() response = self.client.get('/logout/') self.assertTrue('site' in response.context) def test_logout_with_overridden_redirect_url(self): # Bug 11223 self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) response = self.client.get('/logout/next_page/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_next_page_specified(self): "Logout with next_page option given redirects to specified resource" self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_logout_with_redirect_argument(self): "Logout with query string redirects to specified resource" self.login() response = self.client.get('/logout/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_custom_redirect_argument(self): "Logout with custom query string redirects to specified resource" self.login() response = self.client.get('/logout/custom_query/?follow=/somewhere/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_security_check(self, password='password'): logout_url = reverse('django.contrib.auth.views.logout') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com' ): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urllib.quote(bad_url) } self.login() response = self.client.get(nasty_url) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) self.confirm_logged_out() # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', '//testserver/', '/url%20with%20spaces/', # see ticket #12534 ): safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urllib.quote(good_url) } self.login() response = self.client.get(safe_url) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) self.confirm_logged_out()
./CrossVul/dataset_final_sorted/CWE-20/py/good_3768_1
crossvul-python_data_good_1740_2
"""Tornado handlers for the contents web service. Preliminary documentation at https://github.com/ipython/ipython/wiki/IPEP-27%3A-Contents-Service """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import json from tornado import gen, web from notebook.utils import url_path_join, url_escape from jupyter_client.jsonutil import date_default from notebook.base.handlers import ( IPythonHandler, APIHandler, json_errors, path_regex, ) def sort_key(model): """key function for case-insensitive sort by name and type""" iname = model['name'].lower() type_key = { 'directory' : '0', 'notebook' : '1', 'file' : '2', }.get(model['type'], '9') return u'%s%s' % (type_key, iname) def validate_model(model, expect_content): """ Validate a model returned by a ContentsManager method. If expect_content is True, then we expect non-null entries for 'content' and 'format'. """ required_keys = { "name", "path", "type", "writable", "created", "last_modified", "mimetype", "content", "format", } missing = required_keys - set(model.keys()) if missing: raise web.HTTPError( 500, u"Missing Model Keys: {missing}".format(missing=missing), ) maybe_none_keys = ['content', 'format'] if expect_content: errors = [key for key in maybe_none_keys if model[key] is None] if errors: raise web.HTTPError( 500, u"Keys unexpectedly None: {keys}".format(keys=errors), ) else: errors = { key: model[key] for key in maybe_none_keys if model[key] is not None } if errors: raise web.HTTPError( 500, u"Keys unexpectedly not None: {keys}".format(keys=errors), ) class ContentsHandler(APIHandler): SUPPORTED_METHODS = (u'GET', u'PUT', u'PATCH', u'POST', u'DELETE') def location_url(self, path): """Return the full URL location of a file. Parameters ---------- path : unicode The API path of the file, such as "foo/bar.txt". """ return url_escape(url_path_join( self.base_url, 'api', 'contents', path )) def _finish_model(self, model, location=True): """Finish a JSON request with a model, setting relevant headers, etc.""" if location: location = self.location_url(model['path']) self.set_header('Location', location) self.set_header('Last-Modified', model['last_modified']) self.set_header('Content-Type', 'application/json') self.finish(json.dumps(model, default=date_default)) @web.authenticated @json_errors @gen.coroutine def get(self, path=''): """Return a model for a file or directory. A directory model contains a list of models (without content) of the files and directories it contains. """ path = path or '' type = self.get_query_argument('type', default=None) if type not in {None, 'directory', 'file', 'notebook'}: raise web.HTTPError(400, u'Type %r is invalid' % type) format = self.get_query_argument('format', default=None) if format not in {None, 'text', 'base64'}: raise web.HTTPError(400, u'Format %r is invalid' % format) content = self.get_query_argument('content', default='1') if content not in {'0', '1'}: raise web.HTTPError(400, u'Content %r is invalid' % content) content = int(content) model = yield gen.maybe_future(self.contents_manager.get( path=path, type=type, format=format, content=content, )) if model['type'] == 'directory' and content: # group listing by type, then by name (case-insensitive) # FIXME: sorting should be done in the frontends model['content'].sort(key=sort_key) validate_model(model, expect_content=content) self._finish_model(model, location=False) @web.authenticated @json_errors @gen.coroutine def patch(self, path=''): """PATCH renames a file or directory without re-uploading content.""" cm = self.contents_manager model = self.get_json_body() if model is None: raise web.HTTPError(400, u'JSON body missing') model = yield gen.maybe_future(cm.update(model, path)) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _copy(self, copy_from, copy_to=None): """Copy a file, optionally specifying a target directory.""" self.log.info(u"Copying {copy_from} to {copy_to}".format( copy_from=copy_from, copy_to=copy_to or '', )) model = yield gen.maybe_future(self.contents_manager.copy(copy_from, copy_to)) self.set_status(201) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _upload(self, model, path): """Handle upload of a new file to path""" self.log.info(u"Uploading file to %s", path) model = yield gen.maybe_future(self.contents_manager.new(model, path)) self.set_status(201) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _new_untitled(self, path, type='', ext=''): """Create a new, empty untitled entity""" self.log.info(u"Creating new %s in %s", type or 'file', path) model = yield gen.maybe_future(self.contents_manager.new_untitled(path=path, type=type, ext=ext)) self.set_status(201) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _save(self, model, path): """Save an existing file.""" self.log.info(u"Saving file at %s", path) model = yield gen.maybe_future(self.contents_manager.save(model, path)) validate_model(model, expect_content=False) self._finish_model(model) @web.authenticated @json_errors @gen.coroutine def post(self, path=''): """Create a new file in the specified path. POST creates new files. The server always decides on the name. POST /api/contents/path New untitled, empty file or directory. POST /api/contents/path with body {"copy_from" : "/path/to/OtherNotebook.ipynb"} New copy of OtherNotebook in path """ cm = self.contents_manager if cm.file_exists(path): raise web.HTTPError(400, "Cannot POST to files, use PUT instead.") if not cm.dir_exists(path): raise web.HTTPError(404, "No such directory: %s" % path) model = self.get_json_body() if model is not None: copy_from = model.get('copy_from') ext = model.get('ext', '') type = model.get('type', '') if copy_from: yield self._copy(copy_from, path) else: yield self._new_untitled(path, type=type, ext=ext) else: yield self._new_untitled(path) @web.authenticated @json_errors @gen.coroutine def put(self, path=''): """Saves the file in the location specified by name and path. PUT is very similar to POST, but the requester specifies the name, whereas with POST, the server picks the name. PUT /api/contents/path/Name.ipynb Save notebook at ``path/Name.ipynb``. Notebook structure is specified in `content` key of JSON request body. If content is not specified, create a new empty notebook. """ model = self.get_json_body() if model: if model.get('copy_from'): raise web.HTTPError(400, "Cannot copy with PUT, only POST") exists = yield gen.maybe_future(self.contents_manager.file_exists(path)) if exists: yield gen.maybe_future(self._save(model, path)) else: yield gen.maybe_future(self._upload(model, path)) else: yield gen.maybe_future(self._new_untitled(path)) @web.authenticated @json_errors @gen.coroutine def delete(self, path=''): """delete a file in the given path""" cm = self.contents_manager self.log.warn('delete %s', path) yield gen.maybe_future(cm.delete(path)) self.set_status(204) self.finish() class CheckpointsHandler(APIHandler): SUPPORTED_METHODS = ('GET', 'POST') @web.authenticated @json_errors @gen.coroutine def get(self, path=''): """get lists checkpoints for a file""" cm = self.contents_manager checkpoints = yield gen.maybe_future(cm.list_checkpoints(path)) data = json.dumps(checkpoints, default=date_default) self.finish(data) @web.authenticated @json_errors @gen.coroutine def post(self, path=''): """post creates a new checkpoint""" cm = self.contents_manager checkpoint = yield gen.maybe_future(cm.create_checkpoint(path)) data = json.dumps(checkpoint, default=date_default) location = url_path_join(self.base_url, 'api/contents', path, 'checkpoints', checkpoint['id']) self.set_header('Location', url_escape(location)) self.set_status(201) self.finish(data) class ModifyCheckpointsHandler(APIHandler): SUPPORTED_METHODS = ('POST', 'DELETE') @web.authenticated @json_errors @gen.coroutine def post(self, path, checkpoint_id): """post restores a file from a checkpoint""" cm = self.contents_manager yield gen.maybe_future(cm.restore_checkpoint(checkpoint_id, path)) self.set_status(204) self.finish() @web.authenticated @json_errors @gen.coroutine def delete(self, path, checkpoint_id): """delete clears a checkpoint for a given file""" cm = self.contents_manager yield gen.maybe_future(cm.delete_checkpoint(checkpoint_id, path)) self.set_status(204) self.finish() class NotebooksRedirectHandler(IPythonHandler): """Redirect /api/notebooks to /api/contents""" SUPPORTED_METHODS = ('GET', 'PUT', 'PATCH', 'POST', 'DELETE') def get(self, path): self.log.warn("/api/notebooks is deprecated, use /api/contents") self.redirect(url_path_join( self.base_url, 'api/contents', path )) put = patch = post = delete = get #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- _checkpoint_id_regex = r"(?P<checkpoint_id>[\w-]+)" default_handlers = [ (r"/api/contents%s/checkpoints" % path_regex, CheckpointsHandler), (r"/api/contents%s/checkpoints/%s" % (path_regex, _checkpoint_id_regex), ModifyCheckpointsHandler), (r"/api/contents%s" % path_regex, ContentsHandler), (r"/api/notebooks/?(.*)", NotebooksRedirectHandler), ]
./CrossVul/dataset_final_sorted/CWE-20/py/good_1740_2
crossvul-python_data_good_50_6
# -*- coding: utf-8 -*- # # http://www.privacyidea.org # (c) cornelius kölbel, privacyidea.org # # 2018-01-22 Cornelius Kölbel <cornelius.koelbel@netknights.it> # Add offline refill # 2016-12-20 Cornelius Kölbel <cornelius.koelbel@netknights.it> # Add triggerchallenge endpoint # 2016-10-23 Cornelius Kölbel <cornelius.koelbel@netknights.it> # Add subscription decorator # 2016-09-05 Cornelius Kölbel <cornelius.koelbel@netknights.it> # SAML attributes on fail # 2016-08-30 Cornelius Kölbel <cornelius.koelbel@netknights.it> # save client application type to database # 2016-08-09 Cornelius Kölbel <cornelius@privacyidea.org> # Add possiblity to check OTP only # 2015-11-19 Cornelius Kölbel <cornelius@privacyidea.org> # Add support for transaction_id to saml_check # 2015-06-17 Cornelius Kölbel <cornelius@privacyidea.org> # Add policy decorator for API key requirement # 2014-12-08 Cornelius Kölbel, <cornelius@privacyidea.org> # Complete rewrite during flask migration # Try to provide REST API # # This code is free software; you can redistribute it and/or # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE # License as published by the Free Software Foundation; either # version 3 of the License, or any later version. # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU AFFERO GENERAL PUBLIC LICENSE for more details. # # You should have received a copy of the GNU Affero General Public # License along with this program. If not, see <http://www.gnu.org/licenses/>. # __doc__ = """This module contains the REST API for doing authentication. The methods are tested in the file tests/test_api_validate.py Authentication is either done by providing a username and a password or a serial number and a password. **Authentication workflow** Authentication workflow is like this: In case of authenticating a user: * :func:`privacyidea.lib.token.check_user_pass` * :func:`privacyidea.lib.token.check_token_list` * :func:`privacyidea.lib.tokenclass.TokenClass.authenticate` * :func:`privacyidea.lib.tokenclass.TokenClass.check_pin` * :func:`privacyidea.lib.tokenclass.TokenClass.check_otp` In case if authenitcating a serial number: * :func:`privacyidea.lib.token.check_serial_pass` * :func:`privacyidea.lib.token.check_token_list` * :func:`privacyidea.lib.tokenclass.TokenClass.authenticate` * :func:`privacyidea.lib.tokenclass.TokenClass.check_pin` * :func:`privacyidea.lib.tokenclass.TokenClass.check_otp` """ from flask import (Blueprint, request, g, current_app) from privacyidea.lib.user import get_user_from_param from .lib.utils import send_result, getParam from ..lib.decorators import (check_user_or_serial_in_request) from .lib.utils import required from privacyidea.lib.error import ParameterError from privacyidea.lib.token import (check_user_pass, check_serial_pass, check_otp) from privacyidea.api.lib.utils import get_all_params from privacyidea.lib.config import (return_saml_attributes, get_from_config, return_saml_attributes_on_fail, SYSCONF) from privacyidea.lib.audit import getAudit from privacyidea.api.lib.prepolicy import (prepolicy, set_realm, api_key_required, mangle, save_client_application_type, check_base_action) from privacyidea.api.lib.postpolicy import (postpolicy, check_tokentype, check_serial, check_tokeninfo, no_detail_on_fail, no_detail_on_success, autoassign, offline_info, add_user_detail_to_response, construct_radius_response) from privacyidea.lib.policy import PolicyClass from privacyidea.lib.config import ConfigClass from privacyidea.lib.event import EventConfiguration import logging from privacyidea.api.lib.postpolicy import postrequest, sign_response from privacyidea.api.auth import jwtauth from privacyidea.api.register import register_blueprint from privacyidea.api.recover import recover_blueprint from privacyidea.lib.utils import get_client_ip from privacyidea.lib.event import event from privacyidea.lib.subscriptions import CheckSubscription from privacyidea.api.auth import admin_required from privacyidea.lib.policy import ACTION from privacyidea.lib.token import get_tokens from privacyidea.lib.machine import list_token_machines from privacyidea.lib.applications.offline import MachineApplication import json log = logging.getLogger(__name__) validate_blueprint = Blueprint('validate_blueprint', __name__) @validate_blueprint.before_request @register_blueprint.before_request @recover_blueprint.before_request def before_request(): """ This is executed before the request """ g.config_object = ConfigClass() request.all_data = get_all_params(request.values, request.data) request.User = get_user_from_param(request.all_data) privacyidea_server = current_app.config.get("PI_AUDIT_SERVERNAME") or \ request.host # Create a policy_object, that reads the database audit settings # and contains the complete policy definition during the request. # This audit_object can be used in the postpolicy and prepolicy and it # can be passed to the innerpolicies. g.policy_object = PolicyClass() g.audit_object = getAudit(current_app.config) g.event_config = EventConfiguration() # access_route contains the ip addresses of all clients, hops and proxies. g.client_ip = get_client_ip(request, get_from_config(SYSCONF.OVERRIDECLIENT)) g.audit_object.log({"success": False, "action_detail": "", "client": g.client_ip, "client_user_agent": request.user_agent.browser, "privacyidea_server": privacyidea_server, "action": "{0!s} {1!s}".format(request.method, request.url_rule), "info": ""}) @validate_blueprint.after_request @register_blueprint.after_request @recover_blueprint.after_request @jwtauth.after_request @postrequest(sign_response, request=request) def after_request(response): """ This function is called after a request :return: The response """ # In certain error cases the before_request was not handled # completely so that we do not have an audit_object if "audit_object" in g: g.audit_object.finalize_log() # No caching! response.headers['Cache-Control'] = 'no-cache' return response @validate_blueprint.route('/offlinerefill', methods=['POST']) @check_user_or_serial_in_request(request) @event("validate_offlinerefill", request, g) def offlinerefill(): """ This endpoint allows to fetch new offline OTP values for a token, that is already offline. According to the definition it will send the missing OTP values, so that the client will have as much otp values as defined. :param serial: The serial number of the token, that should be refilled. :param refilltoken: The authorization token, that allows refilling. :param pass: the last password (maybe password+OTP) entered by the user :return: """ result = False otps = {} serial = getParam(request.all_data, "serial", required) refilltoken = getParam(request.all_data, "refilltoken", required) password = getParam(request.all_data, "pass", required) tokenobj_list = get_tokens(serial=serial) if len(tokenobj_list) != 1: raise ParameterError("The token does not exist") else: tokenobj = tokenobj_list[0] machine_defs = list_token_machines(serial) # check if is still an offline token: for mdef in machine_defs: if mdef.get("application") == "offline": # check refill token: if tokenobj.get_tokeninfo("refilltoken") == refilltoken: # refill otps = MachineApplication.get_refill(tokenobj, password, mdef.get("options")) refilltoken = MachineApplication.generate_new_refilltoken(tokenobj) response = send_result(True) content = json.loads(response.data) content["auth_items"] = {"offline": [{"refilltoken": refilltoken, "response": otps}]} response.data = json.dumps(content) return response raise ParameterError("Token is not an offline token or refill token is incorrect") @validate_blueprint.route('/check', methods=['POST', 'GET']) @validate_blueprint.route('/radiuscheck', methods=['POST', 'GET']) @postpolicy(construct_radius_response, request=request) @postpolicy(no_detail_on_fail, request=request) @postpolicy(no_detail_on_success, request=request) @postpolicy(add_user_detail_to_response, request=request) @postpolicy(offline_info, request=request) @postpolicy(check_tokeninfo, request=request) @postpolicy(check_tokentype, request=request) @postpolicy(check_serial, request=request) @postpolicy(autoassign, request=request) @prepolicy(set_realm, request=request) @prepolicy(mangle, request=request) @prepolicy(save_client_application_type, request=request) @check_user_or_serial_in_request(request) @CheckSubscription(request) @prepolicy(api_key_required, request=request) @event("validate_check", request, g) def check(): """ check the authentication for a user or a serial number. Either a ``serial`` or a ``user`` is required to authenticate. The PIN and OTP value is sent in the parameter ``pass``. In case of successful authentication it returns ``result->value: true``. In case of a challenge response authentication a parameter ``exception=1`` can be passed. This would result in a HTTP 500 Server Error response if an error occurred during sending of SMS or Email. In case ``/validate/radiuscheck`` is requested, the responses are modified as follows: A successful authentication returns an empty HTTP 204 response. An unsuccessful authentication returns an empty HTTP 400 response. Error responses are the same responses as for the ``/validate/check`` endpoint. :param serial: The serial number of the token, that tries to authenticate. :param user: The loginname/username of the user, who tries to authenticate. :param realm: The realm of the user, who tries to authenticate. If the realm is omitted, the user is looked up in the default realm. :param pass: The password, that consists of the OTP PIN and the OTP value. :param otponly: If set to 1, only the OTP value is verified. This is used in the management UI. Only used with the parameter serial. :param transaction_id: The transaction ID for a response to a challenge request :param state: The state ID for a response to a challenge request :return: a json result with a boolean "result": true **Example Validation Request**: .. sourcecode:: http POST /validate/check HTTP/1.1 Host: example.com Accept: application/json user=user realm=realm1 pass=s3cret123456 **Example response** for a successful authentication: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "detail": { "message": "matching 1 tokens", "serial": "PISP0000AB00", "type": "spass" }, "id": 1, "jsonrpc": "2.0", "result": { "status": true, "value": true }, "version": "privacyIDEA unknown" } **Example response** for this first part of a challenge response authentication: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "detail": { "serial": "PIEM0000AB00", "type": "email", "transaction_id": "12345678901234567890", "multi_challenge: [ {"serial": "PIEM0000AB00", "transaction_id": "12345678901234567890", "message": "Please enter otp from your email"}, {"serial": "PISM12345678", "transaction_id": "12345678901234567890", "message": "Please enter otp from your SMS"} ] }, "id": 1, "jsonrpc": "2.0", "result": { "status": true, "value": false }, "version": "privacyIDEA unknown" } In this example two challenges are triggered, one with an email and one with an SMS. The application and thus the user has to decide, which one to use. They can use either. .. note:: All challenge response tokens have the same transaction_id in this case. """ #user = get_user_from_param(request.all_data) user = request.User serial = getParam(request.all_data, "serial") password = getParam(request.all_data, "pass", required) otp_only = getParam(request.all_data, "otponly") options = {"g": g, "clientip": g.client_ip} # Add all params to the options for key, value in request.all_data.items(): if value and key not in ["g", "clientip"]: options[key] = value g.audit_object.log({"user": user.login, "resolver": user.resolver, "realm": user.realm}) if serial: if not otp_only: result, details = check_serial_pass(serial, password, options=options) else: result, details = check_otp(serial, password) else: result, details = check_user_pass(user, password, options=options) g.audit_object.log({"info": details.get("message"), "success": result, "serial": serial or details.get("serial"), "tokentype": details.get("type")}) return send_result(result, details=details) @validate_blueprint.route('/samlcheck', methods=['POST', 'GET']) @postpolicy(no_detail_on_fail, request=request) @postpolicy(no_detail_on_success, request=request) @postpolicy(add_user_detail_to_response, request=request) @postpolicy(check_tokeninfo, request=request) @postpolicy(check_tokentype, request=request) @postpolicy(check_serial, request=request) @postpolicy(autoassign, request=request) @prepolicy(set_realm, request=request) @prepolicy(mangle, request=request) @prepolicy(save_client_application_type, request=request) @check_user_or_serial_in_request(request) @CheckSubscription(request) @prepolicy(api_key_required, request=request) @event("validate_check", request, g) def samlcheck(): """ Authenticate the user and return the SAML user information. :param user: The loginname/username of the user, who tries to authenticate. :param realm: The realm of the user, who tries to authenticate. If the realm is omitted, the user is looked up in the default realm. :param pass: The password, that consists of the OTP PIN and the OTP value. :return: a json result with a boolean "result": true **Example response** for a successful authentication: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "detail": { "message": "matching 1 tokens", "serial": "PISP0000AB00", "type": "spass" }, "id": 1, "jsonrpc": "2.0", "result": { "status": true, "value": {"attributes": { "username": "koelbel", "realm": "themis", "mobile": null, "phone": null, "myOwn": "/data/file/home/koelbel", "resolver": "themis", "surname": "Kölbel", "givenname": "Cornelius", "email": null}, "auth": true} }, "version": "privacyIDEA unknown" } The response in value->attributes can contain additional attributes (like "myOwn") which you can define in the LDAP resolver in the attribute mapping. """ user = get_user_from_param(request.all_data) password = getParam(request.all_data, "pass", required) options = {"g": g, "clientip": g.client_ip} # Add all params to the options for key, value in request.all_data.items(): if value and key not in ["g", "clientip"]: options[key] = value auth, details = check_user_pass(user, password, options=options) ui = user.info result_obj = {"auth": auth, "attributes": {}} if return_saml_attributes(): if auth or return_saml_attributes_on_fail(): # privacyIDEA's own attribute map result_obj["attributes"] = {"username": ui.get("username"), "realm": user.realm, "resolver": user.resolver, "email": ui.get("email"), "surname": ui.get("surname"), "givenname": ui.get("givenname"), "mobile": ui.get("mobile"), "phone": ui.get("phone") } # additional attributes for k, v in ui.iteritems(): result_obj["attributes"][k] = v g.audit_object.log({"info": details.get("message"), "success": auth, "serial": details.get("serial"), "tokentype": details.get("type"), "user": user.login, "resolver": user.resolver, "realm": user.realm}) return send_result(result_obj, details=details) @validate_blueprint.route('/triggerchallenge', methods=['POST', 'GET']) @admin_required @check_user_or_serial_in_request(request) @prepolicy(check_base_action, request, action=ACTION.TRIGGERCHALLENGE) @event("validate_triggerchallenge", request, g) def trigger_challenge(): """ An administrator can call this endpoint if he has the right of ``triggerchallenge`` (scope: admin). He can pass a ``user`` name and or a ``serial`` number. privacyIDEA will trigger challenges for all native challenges response tokens, possessed by this user or only for the given serial number. The request needs to contain a valid PI-Authorization header. :param user: The loginname/username of the user, who tries to authenticate. :param realm: The realm of the user, who tries to authenticate. If the realm is omitted, the user is looked up in the default realm. :param serial: The serial number of the token. :return: a json result with a "result" of the number of matching challenge response tokens **Example response** for a successful triggering of challenge: .. sourcecode:: http {"jsonrpc": "2.0", "signature": "1939...146964", "detail": {"transaction_ids": ["03921966357577766962"], "messages": ["Enter the OTP from the SMS:"], "threadid": 140422378276608}, "versionnumber": "unknown", "version": "privacyIDEA unknown", "result": {"status": true, "value": 1}, "time": 1482223663.517212, "id": 1} **Example response** for response, if the user has no challenge token: .. sourcecode:: http {"detail": {"messages": [], "threadid": 140031212377856, "transaction_ids": []}, "id": 1, "jsonrpc": "2.0", "result": {"status": true, "value": 0}, "signature": "205530282...54508", "time": 1484303812.346576, "version": "privacyIDEA 2.17", "versionnumber": "2.17"} **Example response** for a failed triggering of a challenge. In this case the ``status`` will be ``false``. .. sourcecode:: http {"detail": null, "id": 1, "jsonrpc": "2.0", "result": {"error": {"code": 905, "message": "ERR905: The user can not be found in any resolver in this realm!"}, "status": false}, "signature": "14468...081555", "time": 1484303933.72481, "version": "privacyIDEA 2.17"} """ user = request.User serial = getParam(request.all_data, "serial") result_obj = 0 details = {"messages": [], "transaction_ids": []} options = {"g": g, "clientip": g.client_ip, "user": user} token_objs = get_tokens(serial=serial, user=user, active=True, revoked=False, locked=False) for token_obj in token_objs: if "challenge" in token_obj.mode: # If this is a challenge response token, we create a challenge success, return_message, transactionid, attributes = \ token_obj.create_challenge(options=options) if attributes: details["attributes"] = attributes if success: result_obj += 1 details.get("transaction_ids").append(transactionid) # This will write only the serial of the token that was processed last to the audit log g.audit_object.log({ "serial": token_obj.token.serial, }) details.get("messages").append(return_message) g.audit_object.log({ "user": user.login, "resolver": user.resolver, "realm": user.realm, "success": result_obj > 0, "info": "triggered {0!s} challenges".format(result_obj), }) return send_result(result_obj, details=details)
./CrossVul/dataset_final_sorted/CWE-20/py/good_50_6
crossvul-python_data_good_1065_1
# Copyright (C) 2002-2007 Python Software Foundation # Contact: email-sig@python.org """Email address parsing code. Lifted directly from rfc822.py. This should eventually be rewritten. """ __all__ = [ 'mktime_tz', 'parsedate', 'parsedate_tz', 'quote', ] import time, calendar SPACE = ' ' EMPTYSTRING = '' COMMASPACE = ', ' # Parse a date field _monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'] _daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] # The timezone table does not include the military time zones defined # in RFC822, other than Z. According to RFC1123, the description in # RFC822 gets the signs wrong, so we can't rely on any such time # zones. RFC1123 recommends that numeric timezone indicators be used # instead of timezone names. _timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, 'AST': -400, 'ADT': -300, # Atlantic (used in Canada) 'EST': -500, 'EDT': -400, # Eastern 'CST': -600, 'CDT': -500, # Central 'MST': -700, 'MDT': -600, # Mountain 'PST': -800, 'PDT': -700 # Pacific } def parsedate_tz(data): """Convert a date string to a time tuple. Accounts for military timezones. """ res = _parsedate_tz(data) if not res: return if res[9] is None: res[9] = 0 return tuple(res) def _parsedate_tz(data): """Convert date to extended time tuple. The last (additional) element is the time zone offset in seconds, except if the timezone was specified as -0000. In that case the last element is None. This indicates a UTC timestamp that explicitly declaims knowledge of the source timezone, as opposed to a +0000 timestamp that indicates the source timezone really was UTC. """ if not data: return data = data.split() # The FWS after the comma after the day-of-week is optional, so search and # adjust for this. if data[0].endswith(',') or data[0].lower() in _daynames: # There's a dayname here. Skip it del data[0] else: i = data[0].rfind(',') if i >= 0: data[0] = data[0][i+1:] if len(data) == 3: # RFC 850 date, deprecated stuff = data[0].split('-') if len(stuff) == 3: data = stuff + data[1:] if len(data) == 4: s = data[3] i = s.find('+') if i == -1: i = s.find('-') if i > 0: data[3:] = [s[:i], s[i:]] else: data.append('') # Dummy tz if len(data) < 5: return None data = data[:5] [dd, mm, yy, tm, tz] = data mm = mm.lower() if mm not in _monthnames: dd, mm = mm, dd.lower() if mm not in _monthnames: return None mm = _monthnames.index(mm) + 1 if mm > 12: mm -= 12 if dd[-1] == ',': dd = dd[:-1] i = yy.find(':') if i > 0: yy, tm = tm, yy if yy[-1] == ',': yy = yy[:-1] if not yy[0].isdigit(): yy, tz = tz, yy if tm[-1] == ',': tm = tm[:-1] tm = tm.split(':') if len(tm) == 2: [thh, tmm] = tm tss = '0' elif len(tm) == 3: [thh, tmm, tss] = tm elif len(tm) == 1 and '.' in tm[0]: # Some non-compliant MUAs use '.' to separate time elements. tm = tm[0].split('.') if len(tm) == 2: [thh, tmm] = tm tss = 0 elif len(tm) == 3: [thh, tmm, tss] = tm else: return None try: yy = int(yy) dd = int(dd) thh = int(thh) tmm = int(tmm) tss = int(tss) except ValueError: return None # Check for a yy specified in two-digit format, then convert it to the # appropriate four-digit format, according to the POSIX standard. RFC 822 # calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822) # mandates a 4-digit yy. For more information, see the documentation for # the time module. if yy < 100: # The year is between 1969 and 1999 (inclusive). if yy > 68: yy += 1900 # The year is between 2000 and 2068 (inclusive). else: yy += 2000 tzoffset = None tz = tz.upper() if tz in _timezones: tzoffset = _timezones[tz] else: try: tzoffset = int(tz) except ValueError: pass if tzoffset==0 and tz.startswith('-'): tzoffset = None # Convert a timezone offset into seconds ; -0500 -> -18000 if tzoffset: if tzoffset < 0: tzsign = -1 tzoffset = -tzoffset else: tzsign = 1 tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60) # Daylight Saving Time flag is set to -1, since DST is unknown. return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset] def parsedate(data): """Convert a time string to a time tuple.""" t = parsedate_tz(data) if isinstance(t, tuple): return t[:9] else: return t def mktime_tz(data): """Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp.""" if data[9] is None: # No zone info, so localtime is better assumption than GMT return time.mktime(data[:8] + (-1,)) else: t = calendar.timegm(data) return t - data[9] def quote(str): """Prepare string to be used in a quoted string. Turns backslash and double quote characters into quoted pairs. These are the only characters that need to be quoted inside a quoted string. Does not add the surrounding double quotes. """ return str.replace('\\', '\\\\').replace('"', '\\"') class AddrlistClass: """Address parser class by Ben Escoto. To understand what this class does, it helps to have a copy of RFC 2822 in front of you. Note: this class interface is deprecated and may be removed in the future. Use email.utils.AddressList instead. """ def __init__(self, field): """Initialize a new instance. `field' is an unparsed address header field, containing one or more addresses. """ self.specials = '()<>@,:;.\"[]' self.pos = 0 self.LWS = ' \t' self.CR = '\r\n' self.FWS = self.LWS + self.CR self.atomends = self.specials + self.LWS + self.CR # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it # is obsolete syntax. RFC 2822 requires that we recognize obsolete # syntax, so allow dots in phrases. self.phraseends = self.atomends.replace('.', '') self.field = field self.commentlist = [] def gotonext(self): """Skip white space and extract comments.""" wslist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS + '\n\r': if self.field[self.pos] not in '\n\r': wslist.append(self.field[self.pos]) self.pos += 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) else: break return EMPTYSTRING.join(wslist) def getaddrlist(self): """Parse all addresses. Returns a list containing all of the addresses. """ result = [] while self.pos < len(self.field): ad = self.getaddress() if ad: result += ad else: result.append(('', '')) return result def getaddress(self): """Parse the next address.""" self.commentlist = [] self.gotonext() oldpos = self.pos oldcl = self.commentlist plist = self.getphraselist() self.gotonext() returnlist = [] if self.pos >= len(self.field): # Bad email address technically, no domain. if plist: returnlist = [(SPACE.join(self.commentlist), plist[0])] elif self.field[self.pos] in '.@': # email address is just an addrspec # this isn't very efficient since we start over self.pos = oldpos self.commentlist = oldcl addrspec = self.getaddrspec() returnlist = [(SPACE.join(self.commentlist), addrspec)] elif self.field[self.pos] == ':': # address is a group returnlist = [] fieldlen = len(self.field) self.pos += 1 while self.pos < len(self.field): self.gotonext() if self.pos < fieldlen and self.field[self.pos] == ';': self.pos += 1 break returnlist = returnlist + self.getaddress() elif self.field[self.pos] == '<': # Address is a phrase then a route addr routeaddr = self.getrouteaddr() if self.commentlist: returnlist = [(SPACE.join(plist) + ' (' + ' '.join(self.commentlist) + ')', routeaddr)] else: returnlist = [(SPACE.join(plist), routeaddr)] else: if plist: returnlist = [(SPACE.join(self.commentlist), plist[0])] elif self.field[self.pos] in self.specials: self.pos += 1 self.gotonext() if self.pos < len(self.field) and self.field[self.pos] == ',': self.pos += 1 return returnlist def getrouteaddr(self): """Parse a route address (Return-path value). This method just skips all the route stuff and returns the addrspec. """ if self.field[self.pos] != '<': return expectroute = False self.pos += 1 self.gotonext() adlist = '' while self.pos < len(self.field): if expectroute: self.getdomain() expectroute = False elif self.field[self.pos] == '>': self.pos += 1 break elif self.field[self.pos] == '@': self.pos += 1 expectroute = True elif self.field[self.pos] == ':': self.pos += 1 else: adlist = self.getaddrspec() self.pos += 1 break self.gotonext() return adlist def getaddrspec(self): """Parse an RFC 2822 addr-spec.""" aslist = [] self.gotonext() while self.pos < len(self.field): preserve_ws = True if self.field[self.pos] == '.': if aslist and not aslist[-1].strip(): aslist.pop() aslist.append('.') self.pos += 1 preserve_ws = False elif self.field[self.pos] == '"': aslist.append('"%s"' % quote(self.getquote())) elif self.field[self.pos] in self.atomends: if aslist and not aslist[-1].strip(): aslist.pop() break else: aslist.append(self.getatom()) ws = self.gotonext() if preserve_ws and ws: aslist.append(ws) if self.pos >= len(self.field) or self.field[self.pos] != '@': return EMPTYSTRING.join(aslist) aslist.append('@') self.pos += 1 self.gotonext() domain = self.getdomain() if not domain: # Invalid domain, return an empty address instead of returning a # local part to denote failed parsing. return EMPTYSTRING return EMPTYSTRING.join(aslist) + domain def getdomain(self): """Get the complete domain name from an address.""" sdlist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos += 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] == '[': sdlist.append(self.getdomainliteral()) elif self.field[self.pos] == '.': self.pos += 1 sdlist.append('.') elif self.field[self.pos] == '@': # bpo-34155: Don't parse domains with two `@` like # `a@malicious.org@important.com`. return EMPTYSTRING elif self.field[self.pos] in self.atomends: break else: sdlist.append(self.getatom()) return EMPTYSTRING.join(sdlist) def getdelimited(self, beginchar, endchars, allowcomments=True): """Parse a header fragment delimited by special characters. `beginchar' is the start character for the fragment. If self is not looking at an instance of `beginchar' then getdelimited returns the empty string. `endchars' is a sequence of allowable end-delimiting characters. Parsing stops when one of these is encountered. If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed within the parsed fragment. """ if self.field[self.pos] != beginchar: return '' slist = [''] quote = False self.pos += 1 while self.pos < len(self.field): if quote: slist.append(self.field[self.pos]) quote = False elif self.field[self.pos] in endchars: self.pos += 1 break elif allowcomments and self.field[self.pos] == '(': slist.append(self.getcomment()) continue # have already advanced pos from getcomment elif self.field[self.pos] == '\\': quote = True else: slist.append(self.field[self.pos]) self.pos += 1 return EMPTYSTRING.join(slist) def getquote(self): """Get a quote-delimited fragment from self's field.""" return self.getdelimited('"', '"\r', False) def getcomment(self): """Get a parenthesis-delimited fragment from self's field.""" return self.getdelimited('(', ')\r', True) def getdomainliteral(self): """Parse an RFC 2822 domain-literal.""" return '[%s]' % self.getdelimited('[', ']\r', False) def getatom(self, atomends=None): """Parse an RFC 2822 atom. Optional atomends specifies a different set of end token delimiters (the default is to use self.atomends). This is used e.g. in getphraselist() since phrase endings must not include the `.' (which is legal in phrases).""" atomlist = [''] if atomends is None: atomends = self.atomends while self.pos < len(self.field): if self.field[self.pos] in atomends: break else: atomlist.append(self.field[self.pos]) self.pos += 1 return EMPTYSTRING.join(atomlist) def getphraselist(self): """Parse a sequence of RFC 2822 phrases. A phrase is a sequence of words, which are in turn either RFC 2822 atoms or quoted-strings. Phrases are canonicalized by squeezing all runs of continuous whitespace into one space. """ plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.FWS: self.pos += 1 elif self.field[self.pos] == '"': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.phraseends: break else: plist.append(self.getatom(self.phraseends)) return plist class AddressList(AddrlistClass): """An AddressList encapsulates a list of parsed RFC 2822 addresses.""" def __init__(self, field): AddrlistClass.__init__(self, field) if field: self.addresslist = self.getaddrlist() else: self.addresslist = [] def __len__(self): return len(self.addresslist) def __add__(self, other): # Set union newaddr = AddressList(None) newaddr.addresslist = self.addresslist[:] for x in other.addresslist: if not x in self.addresslist: newaddr.addresslist.append(x) return newaddr def __iadd__(self, other): # Set union, in-place for x in other.addresslist: if not x in self.addresslist: self.addresslist.append(x) return self def __sub__(self, other): # Set difference newaddr = AddressList(None) for x in self.addresslist: if not x in other.addresslist: newaddr.addresslist.append(x) return newaddr def __isub__(self, other): # Set difference, in-place for x in other.addresslist: if x in self.addresslist: self.addresslist.remove(x) return self def __getitem__(self, index): # Make indexing, slices, and 'in' work return self.addresslist[index]
./CrossVul/dataset_final_sorted/CWE-20/py/good_1065_1
crossvul-python_data_bad_2141_1
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import multiprocessing import signal import os import pwd import Queue import random import traceback import tempfile import time import collections import socket import base64 import sys import pipes import jinja2 import subprocess import getpass import ansible.constants as C import ansible.inventory from ansible import utils from ansible.utils import template from ansible.utils import check_conditional from ansible.utils import string_functions from ansible import errors from ansible import module_common import poller import connection from return_data import ReturnData from ansible.callbacks import DefaultRunnerCallbacks, vv from ansible.module_common import ModuleReplacer module_replacer = ModuleReplacer(strip_comments=False) HAS_ATFORK=True try: from Crypto.Random import atfork except ImportError: HAS_ATFORK=False multiprocessing_runner = None OUTPUT_LOCKFILE = tempfile.TemporaryFile() PROCESS_LOCKFILE = tempfile.TemporaryFile() ################################################ def _executor_hook(job_queue, result_queue, new_stdin): # attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17 # this function also not present in CentOS 6 if HAS_ATFORK: atfork() signal.signal(signal.SIGINT, signal.SIG_IGN) while not job_queue.empty(): try: host = job_queue.get(block=False) return_data = multiprocessing_runner._executor(host, new_stdin) result_queue.put(return_data) except Queue.Empty: pass except: traceback.print_exc() class HostVars(dict): ''' A special view of vars_cache that adds values from the inventory when needed. ''' def __init__(self, vars_cache, inventory, vault_password=None): self.vars_cache = vars_cache self.inventory = inventory self.lookup = dict() self.update(vars_cache) self.vault_password = vault_password def __getitem__(self, host): if host not in self.lookup: result = self.inventory.get_variables(host, vault_password=self.vault_password).copy() result.update(self.vars_cache.get(host, {})) self.lookup[host] = result return self.lookup[host] class Runner(object): ''' core API interface to ansible ''' # see bin/ansible for how this is used... def __init__(self, host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage module_path=None, # ex: /usr/share/ansible module_name=C.DEFAULT_MODULE_NAME, # ex: copy module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b" forks=C.DEFAULT_FORKS, # parallelism level timeout=C.DEFAULT_TIMEOUT, # SSH timeout pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org' remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username' remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key remote_port=None, # if SSH on different ports private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None background=0, # async poll every X seconds, else 0 for non-async basedir=None, # directory of playbook, if applicable setup_cache=None, # used to share fact data w/ other tasks vars_cache=None, # used to store variables about hosts transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local' conditional='True', # run only if this fact expression evals to true callbacks=None, # used for output sudo=False, # whether to run sudo or not sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root' module_vars=None, # a playbooks internals thing default_vars=None, # ditto is_playbook=False, # running from playbook or not? inventory=None, # reference to Inventory object subset=None, # subset pattern check=False, # don't make any changes, just try to probe for potential changes diff=False, # whether to show diffs for template files that change environment=None, # environment variables (as dict) to use inside the command complex_args=None, # structured data in addition to module_args, must be a dict error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False accelerate=False, # use accelerated connection accelerate_ipv6=False, # accelerated connection w/ IPv6 accelerate_port=None, # port to use with accelerated connection su=False, # Are we running our command via su? su_user=None, # User to su to when running command, ex: 'root' su_pass=C.DEFAULT_SU_PASS, vault_pass=None, run_hosts=None, # an optional list of pre-calculated hosts to run on no_log=False, # option to enable/disable logging for a given task ): # used to lock multiprocess inputs and outputs at various levels self.output_lockfile = OUTPUT_LOCKFILE self.process_lockfile = PROCESS_LOCKFILE if not complex_args: complex_args = {} # storage & defaults self.check = check self.diff = diff self.setup_cache = utils.default(setup_cache, lambda: collections.defaultdict(dict)) self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict)) self.basedir = utils.default(basedir, lambda: os.getcwd()) self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks()) self.generated_jid = str(random.randint(0, 999999999999)) self.transport = transport self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list)) self.module_vars = utils.default(module_vars, lambda: {}) self.default_vars = utils.default(default_vars, lambda: {}) self.always_run = None self.connector = connection.Connector(self) self.conditional = conditional self.module_name = module_name self.forks = int(forks) self.pattern = pattern self.module_args = module_args self.timeout = timeout self.remote_user = remote_user self.remote_pass = remote_pass self.remote_port = remote_port self.private_key_file = private_key_file self.background = background self.sudo = sudo self.sudo_user_var = sudo_user self.sudo_user = None self.sudo_pass = sudo_pass self.is_playbook = is_playbook self.environment = environment self.complex_args = complex_args self.error_on_undefined_vars = error_on_undefined_vars self.accelerate = accelerate self.accelerate_port = accelerate_port self.accelerate_ipv6 = accelerate_ipv6 self.callbacks.runner = self self.su = su self.su_user_var = su_user self.su_user = None self.su_pass = su_pass self.vault_pass = vault_pass self.no_log = no_log if self.transport == 'smart': # if the transport is 'smart' see if SSH can support ControlPersist if not use paramiko # 'smart' is the default since 1.2.1/1.3 cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = cmd.communicate() if "Bad configuration option" in err: self.transport = "paramiko" else: self.transport = "ssh" # save the original transport, in case it gets # changed later via options like accelerate self.original_transport = self.transport # misc housekeeping if subset and self.inventory._subset is None: # don't override subset when passed from playbook self.inventory.subset(subset) # If we get a pre-built list of hosts to run on, from say a playbook, use them. # Also where we will store the hosts to run on once discovered self.run_hosts = run_hosts if self.transport == 'local': self.remote_user = pwd.getpwuid(os.geteuid())[0] if module_path is not None: for i in module_path.split(os.pathsep): utils.plugins.module_finder.add_directory(i) utils.plugins.push_basedir(self.basedir) # ensure we are using unique tmp paths random.seed() # ***************************************************** def _complex_args_hack(self, complex_args, module_args): """ ansible-playbook both allows specifying key=value string arguments and complex arguments however not all modules use our python common module system and cannot access these. An example might be a Bash module. This hack allows users to still pass "args" as a hash of simple scalars to those arguments and is short term. We could technically just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented it does mean values in 'args' have LOWER priority than those on the key=value line, allowing args to provide yet another way to have pluggable defaults. """ if complex_args is None: return module_args if not isinstance(complex_args, dict): raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args) for (k,v) in complex_args.iteritems(): if isinstance(v, basestring): module_args = "%s=%s %s" % (k, pipes.quote(v), module_args) return module_args # ***************************************************** def _transfer_str(self, conn, tmp, name, data): ''' transfer string to remote file ''' if type(data) == dict: data = utils.jsonify(data) afd, afile = tempfile.mkstemp() afo = os.fdopen(afd, 'w') try: if not isinstance(data, unicode): #ensure the data is valid UTF-8 data.decode('utf-8') else: data = data.encode('utf-8') afo.write(data) except: raise errors.AnsibleError("failure encoding into utf-8") afo.flush() afo.close() remote = conn.shell.join_path(tmp, name) try: conn.put_file(afile, remote) finally: os.unlink(afile) return remote # ***************************************************** def _compute_environment_string(self, conn, inject=None): ''' what environment variables to use when running the command? ''' enviro = {} if self.environment: enviro = template.template(self.basedir, self.environment, inject, convert_bare=True) enviro = utils.safe_eval(enviro) if type(enviro) != dict: raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro) return conn.shell.env_prefix(**enviro) # ***************************************************** def _compute_delegate(self, host, password, remote_inject): """ Build a dictionary of all attributes for the delegate host """ delegate = {} # allow delegated host to be templated delegate['host'] = template.template(self.basedir, host, remote_inject, fail_on_undefined=True) delegate['inject'] = remote_inject.copy() # set any interpreters interpreters = [] for i in delegate['inject']: if i.startswith("ansible_") and i.endswith("_interpreter"): interpreters.append(i) for i in interpreters: del delegate['inject'][i] port = C.DEFAULT_REMOTE_PORT this_host = delegate['host'] # get the vars for the delegate by it's name try: this_info = delegate['inject']['hostvars'][this_host] except: # make sure the inject is empty for non-inventory hosts this_info = {} # get the real ssh_address for the delegate # and allow ansible_ssh_host to be templated delegate['ssh_host'] = template.template(self.basedir, this_info.get('ansible_ssh_host', this_host), this_info, fail_on_undefined=True) delegate['port'] = this_info.get('ansible_ssh_port', port) delegate['user'] = self._compute_delegate_user(this_host, delegate['inject']) delegate['pass'] = this_info.get('ansible_ssh_pass', password) delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file) delegate['transport'] = this_info.get('ansible_connection', self.transport) delegate['sudo_pass'] = this_info.get('ansible_sudo_pass', self.sudo_pass) # Last chance to get private_key_file from global variables. # this is usefull if delegated host is not defined in the inventory if delegate['private_key_file'] is None: delegate['private_key_file'] = remote_inject.get( 'ansible_ssh_private_key_file', None) if delegate['private_key_file'] is not None: delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file']) for i in this_info: if i.startswith("ansible_") and i.endswith("_interpreter"): delegate['inject'][i] = this_info[i] return delegate def _compute_delegate_user(self, host, inject): """ Caculate the remote user based on an order of preference """ # inventory > playbook > original_host actual_user = inject.get('ansible_ssh_user', self.remote_user) thisuser = None if host in inject['hostvars']: if inject['hostvars'][host].get('ansible_ssh_user'): # user for delegate host in inventory thisuser = inject['hostvars'][host].get('ansible_ssh_user') if thisuser is None and self.remote_user: # user defined by play/runner thisuser = self.remote_user if thisuser is not None: actual_user = thisuser else: # fallback to the inventory user of the play host #actual_user = inject.get('ansible_ssh_user', actual_user) actual_user = inject.get('ansible_ssh_user', self.remote_user) return actual_user # ***************************************************** def _execute_module(self, conn, tmp, module_name, args, async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None, delete_remote_tmp=True): ''' transfer and run a module along with its arguments on the remote side''' # hack to support fireball mode if module_name == 'fireball': args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host)))) if 'port' not in args: args += " port=%s" % C.ZEROMQ_PORT ( module_style, shebang, module_data ) = self._configure_module(conn, module_name, args, inject, complex_args) # a remote tmp path may be necessary and not already created if self._late_needs_tmp_path(conn, tmp, module_style): tmp = self._make_tmp_path(conn) remote_module_path = conn.shell.join_path(tmp, module_name) if (module_style != 'new' or async_jid is not None or not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.su): self._transfer_str(conn, tmp, module_name, module_data) environment_string = self._compute_environment_string(conn, inject) if "tmp" in tmp and ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): # deal with possible umask issues once sudo'ed to other user self._remote_chmod(conn, 'a+r', remote_module_path, tmp) cmd = "" in_data = None if module_style != 'new': if 'CHECKMODE=True' in args: # if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to # do --check mode, so to be safe we will not run it. return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot yet run check mode against old-style modules")) elif 'NO_LOG' in args: return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot use no_log: with old-style modules")) args = template.template(self.basedir, args, inject) # decide whether we need to transfer JSON or key=value argsfile = None if module_style == 'non_native_want_json': if complex_args: complex_args.update(utils.parse_kv(args)) argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args)) else: argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args))) else: argsfile = self._transfer_str(conn, tmp, 'arguments', args) if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # deal with possible umask issues once sudo'ed to other user self._remote_chmod(conn, 'a+r', argsfile, tmp) if async_jid is None: cmd = "%s %s" % (remote_module_path, argsfile) else: cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]]) else: if async_jid is None: if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.su: in_data = module_data else: cmd = "%s" % (remote_module_path) else: cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]]) if not shebang: raise errors.AnsibleError("module is missing interpreter line") rm_tmp = None if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if not self.sudo or self.su or self.sudo_user == 'root' or self.su_user == 'root': # not sudoing or sudoing to root, so can cleanup files in the same step rm_tmp = tmp cmd = conn.shell.build_module_command(environment_string, shebang, cmd, rm_tmp) cmd = cmd.strip() sudoable = True if module_name == "accelerate": # always run the accelerate module as the user # specified in the play, not the sudo_user sudoable = False if self.su: res = self._low_level_exec_command(conn, cmd, tmp, su=True, in_data=in_data) else: res = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, in_data=in_data) if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # not sudoing to root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step cmd2 = conn.shell.remove(tmp, recurse=True) self._low_level_exec_command(conn, cmd2, tmp, sudoable=False) data = utils.parse_json(res['stdout']) if 'parsed' in data and data['parsed'] == False: data['msg'] += res['stderr'] return ReturnData(conn=conn, result=data) # ***************************************************** def _executor(self, host, new_stdin): ''' handler for multiprocessing library ''' try: fileno = sys.stdin.fileno() except ValueError: fileno = None try: self._new_stdin = new_stdin if not new_stdin and fileno is not None: try: self._new_stdin = os.fdopen(os.dup(fileno)) except OSError, e: # couldn't dupe stdin, most likely because it's # not a valid file descriptor, so we just rely on # using the one that was passed in pass exec_rc = self._executor_internal(host, new_stdin) if type(exec_rc) != ReturnData: raise Exception("unexpected return type: %s" % type(exec_rc)) # redundant, right? if not exec_rc.comm_ok: self.callbacks.on_unreachable(host, exec_rc.result) return exec_rc except errors.AnsibleError, ae: msg = str(ae) self.callbacks.on_unreachable(host, msg) return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg)) except Exception: msg = traceback.format_exc() self.callbacks.on_unreachable(host, msg) return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg)) # ***************************************************** def _executor_internal(self, host, new_stdin): ''' executes any module one or more times ''' host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass) host_connection = host_variables.get('ansible_connection', self.transport) if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]: port = host_variables.get('ansible_ssh_port', self.remote_port) if port is None: port = C.DEFAULT_REMOTE_PORT else: # fireball, local, etc port = self.remote_port # merge the VARS and SETUP caches for this host combined_cache = self.setup_cache.copy() combined_cache.setdefault(host, {}).update(self.vars_cache.get(host, {})) hostvars = HostVars(combined_cache, self.inventory, vault_password=self.vault_pass) # use combined_cache and host_variables to template the module_vars # we update the inject variables with the data we're about to template # since some of the variables we'll be replacing may be contained there too module_vars_inject = utils.combine_vars(host_variables, combined_cache.get(host, {})) module_vars_inject = utils.combine_vars(self.module_vars, module_vars_inject) module_vars = template.template(self.basedir, self.module_vars, module_vars_inject) inject = {} inject = utils.combine_vars(inject, self.default_vars) inject = utils.combine_vars(inject, host_variables) inject = utils.combine_vars(inject, module_vars) inject = utils.combine_vars(inject, combined_cache.get(host, {})) inject.setdefault('ansible_ssh_user', self.remote_user) inject['hostvars'] = hostvars inject['group_names'] = host_variables.get('group_names', []) inject['groups'] = self.inventory.groups_list() inject['vars'] = self.module_vars inject['defaults'] = self.default_vars inject['environment'] = self.environment inject['playbook_dir'] = self.basedir if self.inventory.basedir() is not None: inject['inventory_dir'] = self.inventory.basedir() if self.inventory.src() is not None: inject['inventory_file'] = self.inventory.src() # allow with_foo to work in playbooks... items = None items_plugin = self.module_vars.get('items_lookup_plugin', None) if items_plugin is not None and items_plugin in utils.plugins.lookup_loader: basedir = self.basedir if '_original_file' in inject: basedir = os.path.dirname(inject['_original_file']) filesdir = os.path.join(basedir, '..', 'files') if os.path.exists(filesdir): basedir = filesdir items_terms = self.module_vars.get('items_lookup_terms', '') items_terms = template.template(basedir, items_terms, inject) items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject) if type(items) != list: raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng' ]: # hack for apt, yum, and pkgng so that with_items maps back into a single module call use_these_items = [] for x in items: inject['item'] = x if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): use_these_items.append(x) inject['item'] = ",".join(use_these_items) items = None # logic to replace complex args if possible complex_args = self.complex_args # logic to decide how to run things depends on whether with_items is used if items is None: if isinstance(complex_args, basestring): complex_args = template.template(self.basedir, complex_args, inject, convert_bare=True) complex_args = utils.safe_eval(complex_args) if type(complex_args) != dict: raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args) elif len(items) > 0: # executing using with_items, so make multiple calls # TODO: refactor if self.background > 0: raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks") all_comm_ok = True all_changed = False all_failed = False results = [] for x in items: # use a fresh inject for each item this_inject = inject.copy() this_inject['item'] = x # TODO: this idiom should be replaced with an up-conversion to a Jinja2 template evaluation if isinstance(self.complex_args, basestring): complex_args = template.template(self.basedir, self.complex_args, this_inject, convert_bare=True) complex_args = utils.safe_eval(complex_args) if type(complex_args) != dict: raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) result = self._executor_internal_inner( host, self.module_name, self.module_args, this_inject, port, complex_args=complex_args ) results.append(result.result) if result.comm_ok == False: all_comm_ok = False all_failed = True break for x in results: if x.get('changed') == True: all_changed = True if (x.get('failed') == True) or ('failed_when_result' in x and [x['failed_when_result']] or [('rc' in x) and (x['rc'] != 0)])[0]: all_failed = True break msg = 'All items completed' if all_failed: msg = "One or more items failed." rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg) if not all_failed: del rd_result['failed'] return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result) else: self.callbacks.on_skipped(host, None) return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True)) # ***************************************************** def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None): ''' decides how to invoke a module ''' # late processing of parameterized sudo_user (with_items,..) if self.sudo_user_var is not None: self.sudo_user = template.template(self.basedir, self.sudo_user_var, inject) if self.su_user_var is not None: self.su_user = template.template(self.basedir, self.su_user_var, inject) # allow module args to work as a dictionary # though it is usually a string new_args = "" if type(module_args) == dict: for (k,v) in module_args.iteritems(): new_args = new_args + "%s='%s' " % (k,v) module_args = new_args # module_name may be dynamic (but cannot contain {{ ansible_ssh_user }}) module_name = template.template(self.basedir, module_name, inject) if module_name in utils.plugins.action_loader: if self.background != 0: raise errors.AnsibleError("async mode is not supported with the %s module" % module_name) handler = utils.plugins.action_loader.get(module_name, self) elif self.background == 0: handler = utils.plugins.action_loader.get('normal', self) else: handler = utils.plugins.action_loader.get('async', self) if type(self.conditional) != list: self.conditional = [ self.conditional ] for cond in self.conditional: if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): result = utils.jsonify(dict(changed=False, skipped=True)) self.callbacks.on_skipped(host, inject.get('item',None)) return ReturnData(host=host, result=result) if getattr(handler, 'setup', None) is not None: handler.setup(module_name, inject) conn = None actual_host = inject.get('ansible_ssh_host', host) # allow ansible_ssh_host to be templated actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True) actual_port = port actual_user = inject.get('ansible_ssh_user', self.remote_user) actual_pass = inject.get('ansible_ssh_pass', self.remote_pass) actual_transport = inject.get('ansible_connection', self.transport) actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file) actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True) self.sudo = utils.boolean(inject.get('ansible_sudo', self.sudo)) self.sudo_user = inject.get('ansible_sudo_user', self.sudo_user) self.sudo_pass = inject.get('ansible_sudo_pass', self.sudo_pass) self.su = inject.get('ansible_su', self.su) self.su_pass = inject.get('ansible_su_pass', self.su_pass) # select default root user in case self.sudo requested # but no user specified; happens e.g. in host vars when # just ansible_sudo=True is specified if self.sudo and self.sudo_user is None: self.sudo_user = 'root' if actual_private_key_file is not None: actual_private_key_file = os.path.expanduser(actual_private_key_file) if self.accelerate and actual_transport != 'local': #Fix to get the inventory name of the host to accelerate plugin if inject.get('ansible_ssh_host', None): self.accelerate_inventory_host = host else: self.accelerate_inventory_host = None # if we're using accelerated mode, force the # transport to accelerate actual_transport = "accelerate" if not self.accelerate_port: self.accelerate_port = C.ACCELERATE_PORT actual_port = inject.get('ansible_ssh_port', port) # the delegated host may have different SSH port configured, etc # and we need to transfer those, and only those, variables delegate_to = inject.get('delegate_to', None) if delegate_to is not None: delegate = self._compute_delegate(delegate_to, actual_pass, inject) actual_transport = delegate['transport'] actual_host = delegate['ssh_host'] actual_port = delegate['port'] actual_user = delegate['user'] actual_pass = delegate['pass'] actual_private_key_file = delegate['private_key_file'] self.sudo_pass = delegate['sudo_pass'] inject = delegate['inject'] # user/pass may still contain variables at this stage actual_user = template.template(self.basedir, actual_user, inject) actual_pass = template.template(self.basedir, actual_pass, inject) self.sudo_pass = template.template(self.basedir, self.sudo_pass, inject) # make actual_user available as __magic__ ansible_ssh_user variable inject['ansible_ssh_user'] = actual_user try: if actual_transport == 'accelerate': # for accelerate, we stuff both ports into a single # variable so that we don't have to mangle other function # calls just to accomodate this one case actual_port = [actual_port, self.accelerate_port] elif actual_port is not None: actual_port = int(template.template(self.basedir, actual_port, inject)) except ValueError, e: result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port) return ReturnData(host=host, comm_ok=False, result=result) try: conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file) if delegate_to or host != actual_host: conn.delegate = host default_shell = getattr(conn, 'default_shell', '') shell_type = inject.get('ansible_shell_type') if not shell_type: if default_shell: shell_type = default_shell else: shell_type = os.path.basename(C.DEFAULT_EXECUTABLE) shell_plugin = utils.plugins.shell_loader.get(shell_type) if shell_plugin is None: shell_plugin = utils.plugins.shell_loader.get('sh') conn.shell = shell_plugin except errors.AnsibleConnectionFailed, e: result = dict(failed=True, msg="FAILED: %s" % str(e)) return ReturnData(host=host, comm_ok=False, result=result) tmp = '' # action plugins may DECLARE via TRANSFERS_FILES = True that they need a remote tmp path working dir if self._early_needs_tmp_path(module_name, handler): tmp = self._make_tmp_path(conn) # render module_args and complex_args templates try: module_args = template.template(self.basedir, module_args, inject, fail_on_undefined=self.error_on_undefined_vars) complex_args = template.template(self.basedir, complex_args, inject, fail_on_undefined=self.error_on_undefined_vars) except jinja2.exceptions.UndefinedError, e: raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e)) result = handler.run(conn, tmp, module_name, module_args, inject, complex_args) # Code for do until feature until = self.module_vars.get('until', None) if until is not None and result.comm_ok: inject[self.module_vars.get('register')] = result.result cond = template.template(self.basedir, until, inject, expand_lists=False) if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): retries = self.module_vars.get('retries') delay = self.module_vars.get('delay') for x in range(1, int(retries) + 1): # template the delay, cast to float and sleep delay = template.template(self.basedir, delay, inject, expand_lists=False) delay = float(delay) time.sleep(delay) tmp = '' if self._early_needs_tmp_path(module_name, handler): tmp = self._make_tmp_path(conn) result = handler.run(conn, tmp, module_name, module_args, inject, complex_args) result.result['attempts'] = x vv("Result from run %i is: %s" % (x, result.result)) inject[self.module_vars.get('register')] = result.result cond = template.template(self.basedir, until, inject, expand_lists=False) if utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): break if result.result['attempts'] == retries and not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): result.result['failed'] = True result.result['msg'] = "Task failed as maximum retries was encountered" else: result.result['attempts'] = 0 conn.close() if not result.comm_ok: # connection or parsing errors... self.callbacks.on_unreachable(host, result.result) else: data = result.result # https://github.com/ansible/ansible/issues/4958 if hasattr(sys.stdout, "isatty"): if "stdout" in data and sys.stdout.isatty(): if not string_functions.isprintable(data['stdout']): data['stdout'] = '' if 'item' in inject: result.result['item'] = inject['item'] result.result['invocation'] = dict( module_args=module_args, module_name=module_name ) changed_when = self.module_vars.get('changed_when') failed_when = self.module_vars.get('failed_when') if (changed_when is not None or failed_when is not None) and self.background == 0: register = self.module_vars.get('register') if register is not None: if 'stdout' in data: data['stdout_lines'] = data['stdout'].splitlines() inject[register] = data # only run the final checks if the async_status has finished, # or if we're not running an async_status check at all if (module_name == 'async_status' and "finished" in data) or module_name != 'async_status': if changed_when is not None and 'skipped' not in data: data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) if failed_when is not None and 'skipped' not in data: data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) if is_chained: # no callbacks return result if 'skipped' in data: self.callbacks.on_skipped(host, inject.get('item',None)) elif not result.is_successful(): ignore_errors = self.module_vars.get('ignore_errors', False) self.callbacks.on_failed(host, data, ignore_errors) else: if self.diff: self.callbacks.on_file_diff(conn.host, result.diff) self.callbacks.on_ok(host, data) return result def _early_needs_tmp_path(self, module_name, handler): ''' detect if a tmp path should be created before the handler is called ''' if module_name in utils.plugins.action_loader: return getattr(handler, 'TRANSFERS_FILES', False) # other modules never need tmp path at early stage return False def _late_needs_tmp_path(self, conn, tmp, module_style): if "tmp" in tmp: # tmp has already been created return False if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.su: # tmp is necessary to store module source code return True if not conn.has_pipelining: # tmp is necessary to store the module source code # or we want to keep the files on the target system return True if module_style != "new": # even when conn has pipelining, old style modules need tmp to store arguments return True return False # ***************************************************** def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False, executable=None, su=False, in_data=None): ''' execute a command string over SSH, return the output ''' if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) return dict(stdout='', stderr='') if executable is None: executable = C.DEFAULT_EXECUTABLE sudo_user = self.sudo_user su_user = self.su_user # compare connection user to (su|sudo)_user and disable if the same # assume connection type is local if no user attribute this_user = getattr(conn, 'user', getpass.getuser()) if (not su and this_user == sudo_user) or (su and this_user == su_user): sudoable = False su = False if su: rc, stdin, stdout, stderr = conn.exec_command(cmd, tmp, su=su, su_user=su_user, executable=executable, in_data=in_data) else: rc, stdin, stdout, stderr = conn.exec_command(cmd, tmp, sudo_user, sudoable=sudoable, executable=executable, in_data=in_data) if type(stdout) not in [ str, unicode ]: out = ''.join(stdout.readlines()) else: out = stdout if type(stderr) not in [ str, unicode ]: err = ''.join(stderr.readlines()) else: err = stderr if rc is not None: return dict(rc=rc, stdout=out, stderr=err) else: return dict(stdout=out, stderr=err) # ***************************************************** def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, su=False): ''' issue a remote chmod command ''' cmd = conn.shell.chmod(mode, path) return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, su=su) # ***************************************************** def _remote_md5(self, conn, tmp, path): ''' takes a remote md5sum without requiring python, and returns 1 if no file ''' cmd = conn.shell.md5(path) data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) data2 = utils.last_non_blank_line(data['stdout']) try: if data2 == '': # this may happen if the connection to the remote server # failed, so just return "INVALIDMD5SUM" to avoid errors return "INVALIDMD5SUM" else: return data2.split()[0] except IndexError: sys.stderr.write("warning: md5sum command failed unusually, please report this to the list so it can be fixed\n") sys.stderr.write("command: %s\n" % md5s) sys.stderr.write("----\n") sys.stderr.write("output: %s\n" % data) sys.stderr.write("----\n") # this will signal that it changed and allow things to keep going return "INVALIDMD5SUM" # ***************************************************** def _make_tmp_path(self, conn): ''' make and return a temporary path on a remote box ''' basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) use_system_tmp = False if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): use_system_tmp = True tmp_mode = None if self.remote_user != 'root' or ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): tmp_mode = 'a+rx' cmd = conn.shell.mkdtemp(basefile, use_system_tmp, tmp_mode) result = self._low_level_exec_command(conn, cmd, None, sudoable=False) # error handling on this seems a little aggressive? if result['rc'] != 0: if result['rc'] == 5: output = 'Authentication failure.' elif result['rc'] == 255 and self.transport in ['ssh']: if utils.VERBOSITY > 3: output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr']) else: output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue' else: output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc']) if 'stdout' in result and result['stdout'] != '': output = output + ": %s" % result['stdout'] raise errors.AnsibleError(output) rc = conn.shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '') # Catch failure conditions, files should never be # written to locations in /. if rc == '/': raise errors.AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd)) return rc # ***************************************************** def _remove_tmp_path(self, conn, tmp_path): ''' Remove a tmp_path. ''' if "-tmp-" in tmp_path: cmd = conn.shell.remove(tmp_path, recurse=True) self._low_level_exec_command(conn, cmd, None, sudoable=False) # If we have gotten here we have a working ssh configuration. # If ssh breaks we could leave tmp directories out on the remote system. # ***************************************************** def _copy_module(self, conn, tmp, module_name, module_args, inject, complex_args=None): ''' transfer a module over SFTP, does not run it ''' ( module_style, module_shebang, module_data ) = self._configure_module(conn, module_name, module_args, inject, complex_args) module_remote_path = conn.shell.join_path(tmp, module_name) self._transfer_str(conn, tmp, module_name, module_data) return (module_remote_path, module_style, module_shebang) # ***************************************************** def _configure_module(self, conn, module_name, module_args, inject, complex_args=None): ''' find module and configure it ''' # Search module path(s) for named module. module_suffixes = getattr(conn, 'default_suffixes', None) module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes) if module_path is None: raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths())) # insert shared code and arguments into the module (module_data, module_style, module_shebang) = module_replacer.modify_module( module_path, complex_args, module_args, inject ) return (module_style, module_shebang, module_data) # ***************************************************** def _parallel_exec(self, hosts): ''' handles mulitprocessing when more than 1 fork is required ''' manager = multiprocessing.Manager() job_queue = manager.Queue() for host in hosts: job_queue.put(host) result_queue = manager.Queue() try: fileno = sys.stdin.fileno() except ValueError: fileno = None workers = [] for i in range(self.forks): new_stdin = None if fileno is not None: try: new_stdin = os.fdopen(os.dup(fileno)) except OSError, e: # couldn't dupe stdin, most likely because it's # not a valid file descriptor, so we just rely on # using the one that was passed in pass prc = multiprocessing.Process(target=_executor_hook, args=(job_queue, result_queue, new_stdin)) prc.start() workers.append(prc) try: for worker in workers: worker.join() except KeyboardInterrupt: for worker in workers: worker.terminate() worker.join() results = [] try: while not result_queue.empty(): results.append(result_queue.get(block=False)) except socket.error: raise errors.AnsibleError("<interrupted>") return results # ***************************************************** def _partition_results(self, results): ''' separate results by ones we contacted & ones we didn't ''' if results is None: return None results2 = dict(contacted={}, dark={}) for result in results: host = result.host if host is None: raise Exception("internal error, host not set") if result.communicated_ok(): results2["contacted"][host] = result.result else: results2["dark"][host] = result.result # hosts which were contacted but never got a chance to return for host in self.run_hosts: if not (host in results2['dark'] or host in results2['contacted']): results2["dark"][host] = {} return results2 # ***************************************************** def run(self): ''' xfer & run module on all matched hosts ''' # find hosts that match the pattern if not self.run_hosts: self.run_hosts = self.inventory.list_hosts(self.pattern) hosts = self.run_hosts if len(hosts) == 0: self.callbacks.on_no_hosts() return dict(contacted={}, dark={}) global multiprocessing_runner multiprocessing_runner = self results = None # Check if this is an action plugin. Some of them are designed # to be ran once per group of hosts. Example module: pause, # run once per hostgroup, rather than pausing once per each # host. p = utils.plugins.action_loader.get(self.module_name, self) if self.forks == 0 or self.forks > len(hosts): self.forks = len(hosts) if p and getattr(p, 'BYPASS_HOST_LOOP', None): # Expose the current hostgroup to the bypassing plugins self.host_set = hosts # We aren't iterating over all the hosts in this # group. So, just pick the first host in our group to # construct the conn object with. result_data = self._executor(hosts[0], None).result # Create a ResultData item for each host in this group # using the returned result. If we didn't do this we would # get false reports of dark hosts. results = [ ReturnData(host=h, result=result_data, comm_ok=True) \ for h in hosts ] del self.host_set elif self.forks > 1: try: results = self._parallel_exec(hosts) except IOError, ie: print ie.errno if ie.errno == 32: # broken pipe from Ctrl+C raise errors.AnsibleError("interrupted") raise else: results = [ self._executor(h, None) for h in hosts ] return self._partition_results(results) # ***************************************************** def run_async(self, time_limit): ''' Run this module asynchronously and return a poller. ''' self.background = time_limit results = self.run() return results, poller.AsyncPoller(results, self) # ***************************************************** def noop_on_check(self, inject): ''' Should the runner run in check mode or not ? ''' # initialize self.always_run on first call if self.always_run is None: self.always_run = self.module_vars.get('always_run', False) self.always_run = check_conditional( self.always_run, self.basedir, inject, fail_on_undefined=True) return (self.check and not self.always_run)
./CrossVul/dataset_final_sorted/CWE-20/py/bad_2141_1
crossvul-python_data_bad_50_5
# -*- coding: utf-8 -*- # # privacyIDEA documentation build configuration file, created by # sphinx-quickstart on Fri Jun 13 07:31:01 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.23.1' # The full version, including alpha/beta/rc tags. #release = '2.16dev5' release = version import sys import os from mock import Mock as MagicMock class Mock(MagicMock): @classmethod def __getattr__(cls, name): return MagicMock() #MOCK_MODULES = ['pandas', 'pyOpenSSL'] MOCK_MODULES = [] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # Monkey-patch functools.wraps # http://stackoverflow.com/questions/28366818/preserve-default-arguments-of-wrapped-decorated-python-function-in-sphinx-docume import functools def no_op_wraps(func): """Replaces functools.wraps in order to undo wrapping. Can be used to preserve the decorated function's signature in the documentation generated by Sphinx. """ def wrapper(decorator): return func return wrapper functools.wraps = no_op_wraps # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) sys.path.append(os.path.abspath('_themes/flask-sphinx-themes')) sys.path.insert(0, os.path.abspath('../privacyidea')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'sphinxcontrib.autohttp.flask'] http_index_ignore_prefixes = ['/token'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'privacyIDEA' copyright = u'2014-2017, Cornelius Kölbel' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #html_theme = 'sphinxdoc' #html_theme = 'sphinx_rtd_theme' #html_theme = 'agogo' html_theme = 'flask' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes/flask-sphinx-themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "images/privacyidea-color.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'privacyIDEAdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'privacyIDEA.tex', u'privacyIDEA Authentication System', u'Cornelius Kölbel', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'privacyidea-server', u'privacyIDEA Authentication System', [u'Cornelius Kölbel'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'privacyIDEA', u'privacyIDEA AUthentication System', u'Cornelius Kölbel', 'privacyIDEA', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
./CrossVul/dataset_final_sorted/CWE-20/py/bad_50_5
crossvul-python_data_bad_1656_0
# Copyright (C) 2013, Red Hat, Inc. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import io import logging import select import socket import struct import sys import time try: # Python 3.x import http.client as httplib import urllib.parse as urlparse except ImportError: # Python 2.x import httplib import urlparse import kdcproxy.codec as codec from kdcproxy.config import MetaResolver class HTTPException(Exception): def __init__(self, code, msg, headers=[]): headers = list(filter(lambda h: h[0] != 'Content-Length', headers)) if 'Content-Type' not in dict(headers): headers.append(('Content-Type', 'text/plain; charset=utf-8')) if sys.version_info.major == 3 and isinstance(msg, str): msg = bytes(msg, "utf-8") headers.append(('Content-Length', str(len(msg)))) super(HTTPException, self).__init__(code, msg, headers) self.code = code self.message = msg self.headers = headers def __str__(self): return "%d %s" % (self.code, httplib.responses[self.code]) class Application: SOCKTYPES = { "tcp": socket.SOCK_STREAM, "udp": socket.SOCK_DGRAM, } def __init__(self): self.__resolver = MetaResolver() def __await_reply(self, pr, rsocks, wsocks, timeout): extra = 0 read_buffers = {} while (timeout + extra) > time.time(): if not wsocks and not rsocks: break r, w, x = select.select(rsocks, wsocks, rsocks + wsocks, (timeout + extra) - time.time()) for sock in x: sock.close() try: rsocks.remove(sock) except ValueError: pass try: wsocks.remove(sock) except ValueError: pass for sock in w: try: if self.sock_type(sock) == socket.SOCK_DGRAM: # If we proxy over UDP, remove the 4-byte length # prefix since it is TCP only. sock.sendall(pr.request[4:]) else: sock.sendall(pr.request) extra = 10 # New connections get 10 extra seconds except Exception: logging.exception('Error in recv() of %s', sock) continue rsocks.append(sock) wsocks.remove(sock) for sock in r: try: reply = self.__handle_recv(sock, read_buffers) except Exception: logging.exception('Error in recv() of %s', sock) if self.sock_type(sock) == socket.SOCK_STREAM: # Remove broken TCP socket from readers rsocks.remove(sock) else: if reply is not None: return reply return None def __handle_recv(self, sock, read_buffers): if self.sock_type(sock) == socket.SOCK_DGRAM: # For UDP sockets, recv() returns an entire datagram # package. KDC sends one datagram as reply. reply = sock.recv(1048576) # If we proxy over UDP, we will be missing the 4-byte # length prefix. So add it. reply = struct.pack("!I", len(reply)) + reply return reply else: # TCP is a different story. The reply must be buffered # until the full answer is accumulated. buf = read_buffers.get(sock) part = sock.recv(1048576) if buf is None: if len(part) > 4: # got enough data in the initial package. Now check # if we got the full package in the first run. (length, ) = struct.unpack("!I", part[0:4]) if length + 4 == len(part): return part read_buffers[sock] = buf = io.BytesIO() if part: # data received, accumulate it in a buffer buf.write(part) return None else: # EOF received read_buffers.pop(sock) reply = buf.getvalue() return reply def __filter_addr(self, addr): if addr[0] not in (socket.AF_INET, socket.AF_INET6): return False if addr[1] not in (socket.SOCK_STREAM, socket.SOCK_DGRAM): return False if addr[2] not in (socket.IPPROTO_TCP, socket.IPPROTO_UDP): return False return True def sock_type(self, sock): try: return sock.type & ~socket.SOCK_NONBLOCK except AttributeError: return sock.type def __call__(self, env, start_response): try: # Validate the method method = env["REQUEST_METHOD"].upper() if method != "POST": raise HTTPException(405, "Method not allowed (%s)." % method) # Parse the request try: length = int(env["CONTENT_LENGTH"]) except AttributeError: length = -1 try: pr = codec.decode(env["wsgi.input"].read(length)) except codec.ParsingError as e: raise HTTPException(400, e.message) # Find the remote proxy servers = self.__resolver.lookup( pr.realm, kpasswd=isinstance(pr, codec.KPASSWDProxyRequest) ) if not servers: raise HTTPException(503, "Can't find remote (%s)." % pr) # Contact the remote server reply = None wsocks = [] rsocks = [] for server in map(urlparse.urlparse, servers): # Enforce valid, supported URIs scheme = server.scheme.lower().split("+", 1) if scheme[0] not in ("kerberos", "kpasswd"): continue if len(scheme) > 1 and scheme[1] not in ("tcp", "udp"): continue # Do the DNS lookup try: port = server.port if port is None: port = scheme[0] addrs = socket.getaddrinfo(server.hostname, port) except socket.gaierror: continue # Sort addresses so that we get TCP first. # # Stick a None address on the end so we can get one # more attempt after all servers have been contacted. addrs = tuple(sorted(filter(self.__filter_addr, addrs))) for addr in addrs + (None,): if addr is not None: # Bypass unspecified socktypes if (len(scheme) > 1 and addr[1] != self.SOCKTYPES[scheme[1]]): continue # Create the socket sock = socket.socket(*addr[:3]) sock.setblocking(0) # Connect try: # In Python 2.x, non-blocking connect() throws # socket.error() with errno == EINPROGRESS. In # Python 3.x, it throws io.BlockingIOError(). sock.connect(addr[4]) except socket.error as e: if e.errno != 115: # errno != EINPROGRESS sock.close() continue except io.BlockingIOError: pass wsocks.append(sock) # Resend packets to UDP servers for sock in tuple(rsocks): if self.sock_type(sock) == socket.SOCK_DGRAM: wsocks.append(sock) rsocks.remove(sock) # Call select() timeout = time.time() + (15 if addr is None else 2) reply = self.__await_reply(pr, rsocks, wsocks, timeout) if reply is not None: break if reply is not None: break for sock in rsocks + wsocks: sock.close() if reply is None: raise HTTPException(503, "Remote unavailable (%s)." % pr) # Return the result to the client raise HTTPException(200, codec.encode(reply), [("Content-Type", "application/kerberos")]) except HTTPException as e: start_response(str(e), e.headers) return [e.message] application = Application()
./CrossVul/dataset_final_sorted/CWE-20/py/bad_1656_0
crossvul-python_data_bad_3767_2
try: from urllib.parse import urlparse, urlunparse except ImportError: # Python 2 from urlparse import urlparse, urlunparse from django.conf import settings from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, QueryDict from django.template.response import TemplateResponse from django.utils.http import base36_to_int from django.utils.translation import ugettext as _ from django.shortcuts import resolve_url from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_protect # Avoid shadowing the login() and logout() views below. from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout, get_user_model from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm, SetPasswordForm, PasswordChangeForm from django.contrib.auth.tokens import default_token_generator from django.contrib.sites.models import get_current_site @sensitive_post_parameters() @csrf_protect @never_cache def login(request, template_name='registration/login.html', redirect_field_name=REDIRECT_FIELD_NAME, authentication_form=AuthenticationForm, current_app=None, extra_context=None): """ Displays the login form and handles the login action. """ redirect_to = request.REQUEST.get(redirect_field_name, '') if request.method == "POST": form = authentication_form(data=request.POST) if form.is_valid(): # Use default setting if redirect_to is empty if not redirect_to: redirect_to = settings.LOGIN_REDIRECT_URL redirect_to = resolve_url(redirect_to) netloc = urlparse(redirect_to)[1] # Heavier security check -- don't allow redirection to a different # host. if netloc and netloc != request.get_host(): redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL) # Okay, security checks complete. Log the user in. auth_login(request, form.get_user()) if request.session.test_cookie_worked(): request.session.delete_test_cookie() return HttpResponseRedirect(redirect_to) else: form = authentication_form(request) request.session.set_test_cookie() current_site = get_current_site(request) context = { 'form': form, redirect_field_name: redirect_to, 'site': current_site, 'site_name': current_site.name, } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) def logout(request, next_page=None, template_name='registration/logged_out.html', redirect_field_name=REDIRECT_FIELD_NAME, current_app=None, extra_context=None): """ Logs out the user and displays 'You are logged out' message. """ auth_logout(request) redirect_to = request.REQUEST.get(redirect_field_name, '') if redirect_to: netloc = urlparse(redirect_to)[1] # Security check -- don't allow redirection to a different host. if not (netloc and netloc != request.get_host()): return HttpResponseRedirect(redirect_to) if next_page is None: current_site = get_current_site(request) context = { 'site': current_site, 'site_name': current_site.name, 'title': _('Logged out') } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) else: # Redirect to this page until the session has been cleared. return HttpResponseRedirect(next_page or request.path) def logout_then_login(request, login_url=None, current_app=None, extra_context=None): """ Logs out the user if he is logged in. Then redirects to the log-in page. """ if not login_url: login_url = settings.LOGIN_URL login_url = resolve_url(login_url) return logout(request, login_url, current_app=current_app, extra_context=extra_context) def redirect_to_login(next, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME): """ Redirects the user to the login page, passing the given 'next' page """ resolved_url = resolve_url(login_url or settings.LOGIN_URL) login_url_parts = list(urlparse(resolved_url)) if redirect_field_name: querystring = QueryDict(login_url_parts[4], mutable=True) querystring[redirect_field_name] = next login_url_parts[4] = querystring.urlencode(safe='/') return HttpResponseRedirect(urlunparse(login_url_parts)) # 4 views for password reset: # - password_reset sends the mail # - password_reset_done shows a success message for the above # - password_reset_confirm checks the link the user clicked and # prompts for a new password # - password_reset_complete shows a success message for the above @csrf_protect def password_reset(request, is_admin_site=False, template_name='registration/password_reset_form.html', email_template_name='registration/password_reset_email.html', subject_template_name='registration/password_reset_subject.txt', password_reset_form=PasswordResetForm, token_generator=default_token_generator, post_reset_redirect=None, from_email=None, current_app=None, extra_context=None): if post_reset_redirect is None: post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done') if request.method == "POST": form = password_reset_form(request.POST) if form.is_valid(): opts = { 'use_https': request.is_secure(), 'token_generator': token_generator, 'from_email': from_email, 'email_template_name': email_template_name, 'subject_template_name': subject_template_name, 'request': request, } if is_admin_site: opts = dict(opts, domain_override=request.META['HTTP_HOST']) form.save(**opts) return HttpResponseRedirect(post_reset_redirect) else: form = password_reset_form() context = { 'form': form, } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) def password_reset_done(request, template_name='registration/password_reset_done.html', current_app=None, extra_context=None): context = {} if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) # Doesn't need csrf_protect since no-one can guess the URL @sensitive_post_parameters() @never_cache def password_reset_confirm(request, uidb36=None, token=None, template_name='registration/password_reset_confirm.html', token_generator=default_token_generator, set_password_form=SetPasswordForm, post_reset_redirect=None, current_app=None, extra_context=None): """ View that checks the hash in a password reset link and presents a form for entering a new password. """ UserModel = get_user_model() assert uidb36 is not None and token is not None # checked by URLconf if post_reset_redirect is None: post_reset_redirect = reverse('django.contrib.auth.views.password_reset_complete') try: uid_int = base36_to_int(uidb36) user = UserModel.objects.get(id=uid_int) except (ValueError, OverflowError, UserModel.DoesNotExist): user = None if user is not None and token_generator.check_token(user, token): validlink = True if request.method == 'POST': form = set_password_form(user, request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(post_reset_redirect) else: form = set_password_form(None) else: validlink = False form = None context = { 'form': form, 'validlink': validlink, } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) def password_reset_complete(request, template_name='registration/password_reset_complete.html', current_app=None, extra_context=None): context = { 'login_url': resolve_url(settings.LOGIN_URL) } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) @sensitive_post_parameters() @csrf_protect @login_required def password_change(request, template_name='registration/password_change_form.html', post_change_redirect=None, password_change_form=PasswordChangeForm, current_app=None, extra_context=None): if post_change_redirect is None: post_change_redirect = reverse('django.contrib.auth.views.password_change_done') if request.method == "POST": form = password_change_form(user=request.user, data=request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(post_change_redirect) else: form = password_change_form(user=request.user) context = { 'form': form, } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app) @login_required def password_change_done(request, template_name='registration/password_change_done.html', current_app=None, extra_context=None): context = {} if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app)
./CrossVul/dataset_final_sorted/CWE-20/py/bad_3767_2
crossvul-python_data_good_3660_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import re import time import urllib from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state from nova.api import validator from nova import block_device from nova import compute from nova.compute import instance_types from nova.compute import vm_states from nova import db from nova import exception from nova import flags from nova.image import s3 from nova import log as logging from nova import network from nova.openstack.common import excutils from nova.openstack.common import importutils from nova import quota from nova import utils from nova import volume FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS def validate_ec2_id(val): if not validator.validate_str()(val): raise exception.InvalidInstanceIDMalformed(val) try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: raise exception.InvalidInstanceIDMalformed(val) # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | # stopped 80 _STATE_DESCRIPTION_MAP = { None: inst_state.PENDING, vm_states.ACTIVE: inst_state.RUNNING, vm_states.BUILDING: inst_state.PENDING, vm_states.REBUILDING: inst_state.PENDING, vm_states.DELETED: inst_state.TERMINATED, vm_states.SOFT_DELETE: inst_state.TERMINATED, vm_states.STOPPED: inst_state.STOPPED, vm_states.SHUTOFF: inst_state.SHUTOFF, vm_states.MIGRATING: inst_state.MIGRATE, vm_states.RESIZING: inst_state.RESIZE, vm_states.PAUSED: inst_state.PAUSE, vm_states.SUSPENDED: inst_state.SUSPEND, vm_states.RESCUED: inst_state.RESCUE, } def _state_description(vm_state, shutdown_terminate): """Map the vm state to the server status string""" if (vm_state == vm_states.SHUTOFF and not shutdown_terminate): name = inst_state.STOPPED else: name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) return {'code': inst_state.name_to_code(name), 'name': name} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: id = ec2utils.ec2_vol_id_to_uuid(ec2_id) if ec2_id.startswith('snap-'): bdm['snapshot_id'] = id elif ec2_id.startswith('vol-'): bdm['volume_id'] = id ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Contruct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API(network_api=self.network_api, volume_api=self.volume_api) self.keypair_api = compute.api.KeypairAPI() self.sgh = importutils.import_object(FLAGS.security_group_handler) def __str__(self): return 'CloudController' def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: if not zone in available_zones: available_zones.append(zone) not_available_zones = [] for zone in [service.availability_zone for service in disabled_services if not service['availability_zone'] in available_zones]: if not zone in not_available_zones: not_available_zones.append(zone) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context, False) hosts = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, 'zoneState': ''}) hsvcs = [service for service in services if service['host'] == host] for svc in hsvcs: alive = utils.service_is_up(svc) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' rv['availabilityZoneInfo'].append({ 'zoneName': '| |- %s' % svc['binary'], 'zoneState': '%s %s %s' % (active, art, svc['updated_at'])}) return rv def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_snap_id_to_uuid(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) snapshots = [self._format_snapshot(context, s) for s in snapshots] return {'snapshotSet': snapshots} def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) volume = self.volume_api.get(context, volume_id) snapshot = self.volume_api.create_snapshot( context, volume, None, kwargs.get('description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id) snapshot = self.volume_api.get_snapshot(context, snapshot_id) self.volume_api.delete_snapshot(context, snapshot) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = self.keypair_api.get_key_pairs(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix if context.is_admin or not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Create key pair %s"), key_name, context=context) try: keypair = self.keypair_api.create_key_pair(context, context.user_id, key_name) except exception.KeypairLimitExceeded: msg = _("Quota exceeded, too many key pairs.") raise exception.EC2APIError(msg) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise exception.EC2APIError(msg) except exception.KeyPairExists: msg = _("Key pair '%s' already exists.") % key_name raise exception.KeyPairExists(msg) return {'keyName': key_name, 'keyFingerprint': keypair['fingerprint'], 'keyMaterial': keypair['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_key_pair(self, context, key_name, public_key_material, **kwargs): LOG.audit(_("Import key %s"), key_name, context=context) public_key = base64.b64decode(public_key_material) try: keypair = self.keypair_api.import_key_pair(context, context.user_id, key_name, public_key) except exception.KeypairLimitExceeded: msg = _("Quota exceeded, too many key pairs.") raise exception.EC2APIError(msg) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise exception.EC2APIError(msg) except exception.KeyPairExists: msg = _("Key pair '%s' already exists.") % key_name raise exception.EC2APIError(msg) return {'keyName': key_name, 'keyFingerprint': keypair['fingerprint']} def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: self.keypair_api.delete_key_pair(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): self.compute_api.ensure_default_security_group(context) if group_name or group_id: groups = [] if group_name: for name in group_name: group = db.security_group_get_by_name(context, context.project_id, name) groups.append(group) if group_id: for gid in group_id: group = db.security_group_get(context, gid) groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group.description g['groupName'] = group.name g['ownerId'] = group.project_id g['ipPermissions'] = [] for rule in group.rules: r = {} r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] if rule.protocol: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65535)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if not 'groups' in kwargs and not 'ip_ranges' in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): values = {} if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = db.security_group_get_by_name( context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() if not utils.is_valid_cidr(cidr_ip): # Raise exception for non-valid address raise exception.EC2APIError(_("Invalid CIDR")) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' if source_security_group_name: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol.lower() values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return rule['id'] return False def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) rule_id = None rule_ids = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id rule_id = self._security_group_rule_exists(security_group, values_for_rule) if rule_id: db.security_group_rule_destroy(context, rule_id) rule_ids.append(rule_id) if rule_id: # NOTE(vish): we removed a rule, so refresh self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) postvalues = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values_for_rule): err = _('%s - This rule already exists in group') raise exception.EC2APIError(err % values_for_rule) postvalues.append(values_for_rule) count = QUOTAS.count(context, 'security_group_rules', security_group['id']) try: QUOTAS.limit_check(context, security_group_rules=count + 1) except exception.OverQuota: msg = _("Quota exceeded, too many security group rules.") raise exception.EC2APIError(msg) rule_ids = [] for values_for_rule in postvalues: security_group_rule = db.security_group_rule_create( context, values_for_rule) rule_ids.append(security_group_rule['id']) if postvalues: self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_create_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if isinstance(group_name, unicode): group_name = group_name.encode('utf-8') # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. if FLAGS.ec2_strict_validation: # EC2 specification gives constraints for name and description: # Accepts alphanumeric characters, spaces, dashes, and underscores err = _("Value (%(value)s) for parameter %(param)s is invalid." " Content limited to Alphanumeric characters," " spaces, dashes, and underscores.") if not re.match('^[a-zA-Z0-9_\- ]+$', group_name): raise exception.InvalidParameterValue( err=err % {"value": group_name, "param": "GroupName"}) if not re.match('^[a-zA-Z0-9_\- ]+$', group_description): raise exception.InvalidParameterValue( err=err % {"value": group_description, "param": "GroupDescription"}) else: # Amazon accepts more symbols. # So, allow POSIX [:print:] characters. if not re.match(r'^[\x20-\x7E]+$', group_name): err = _("Value (%(value)s) for parameter %(param)s is invalid." " Content is limited to characters" " from the [:print:] class.") raise exception.InvalidParameterValue( err=err % {"value": group_name, "param": "GroupName"}) if len(group_name) > 255: err = _("Value (%s) for parameter GroupName is invalid." " Length exceeds maximum of 255.") % group_name raise exception.InvalidParameterValue(err=err) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('group %s already exists') raise exception.EC2APIError(msg % group_name) try: reservations = QUOTAS.reserve(context, security_groups=1) except exception.OverQuota: msg = _("Quota exceeded, too many security groups.") raise exception.EC2APIError(msg) try: group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) # Commit the reservation QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) elif group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) if db.security_group_in_use(context, security_group.id): raise exception.InvalidGroup(reason="In Use") # Get reservations try: reservations = QUOTAS.reserve(context, security_groups=-1) except Exception: reservations = None LOG.exception(_("Failed to update usages deallocating " "security group")) LOG.audit(_("Delete security group %s"), group_name, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh(context, security_group.id) # Commit the reservations if reservations: QUOTAS.commit(context, reservations) return True def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if isinstance(instance_id, list): ec2_id = instance_id[0] else: ec2_id = instance_id validate_ec2_id(ec2_id) instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, instance_id) output = self.compute_api.get_console_output(context, instance) now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: validate_ec2_id(ec2_id) internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance_uuid', None): instance_uuid = volume['instance_uuid'] instance = db.instance_get_by_uuid(context.elevated(), instance_uuid) instance_id = instance['id'] instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, instance['host']) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['project_id'], volume['host'], instance_data, volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') is not None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None return v def create_volume(self, context, **kwargs): size = kwargs.get('size') if kwargs.get('snapshot_id') is not None: snapshot_id = ec2utils.ec2_snap_id_to_uuid(kwargs['snapshot_id']) snapshot = self.volume_api.get_snapshot(context, snapshot_id) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: snapshot = None LOG.audit(_("Create volume of %s GB"), size, context=context) availability_zone = kwargs.get('availability_zone', None) volume = self.volume_api.create(context, size, None, None, snapshot, availability_zone=availability_zone) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) try: volume = self.volume_api.get(context, volume_id) self.volume_api.delete(context, volume) except exception.InvalidVolume: raise exception.EC2APIError(_('Delete Failed')) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): validate_ec2_id(instance_id) validate_ec2_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) try: self.compute_api.attach_volume(context, instance, volume_id, device) except exception.InvalidVolume: raise exception.EC2APIError(_('Attach Failed.')) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def detach_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) try: instance = self.compute_api.detach_volume(context, volume_id=volume_id) except exception.InvalidVolume: raise exception.EC2APIError(_('Detach Volume Failed.')) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, context, instance_ref, result, key): kernel_uuid = instance_ref['kernel_id'] if kernel_uuid is None or kernel_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki') def _format_ramdisk_id(self, context, instance_ref, result, key): ramdisk_uuid = instance_ref['ramdisk_id'] if ramdisk_uuid is None or ramdisk_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid, 'ari') def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.EC2APIError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance['uuid'], tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): result['disableApiTermination'] = instance['disable_terminate'] def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): if instance['shutdown_terminate']: result['instanceInitiatedShutdownBehavior'] = 'terminate' else: result['instanceInitiatedShutdownBehavior'] = 'stop' def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(context, instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(context, instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = base64.b64decode(instance['user_data']) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.EC2APIError( _('attribute not supported: %s') % attribute) ec2_instance_id = instance_id validate_ec2_id(instance_id) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) result = {'instance_id': ec2_instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_terminate_instances(self, context, instance_id, previous_states): instances_set = [] for (ec2_id, previous_state) in zip(instance_id, previous_states): i = {} i['instanceId'] = ec2_id i['previousState'] = _state_description(previous_state['vm_state'], previous_state['shutdown_terminate']) try: internal_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, internal_id) i['shutdownState'] = _state_description(instance['vm_state'], instance['shutdown_terminate']) except exception.NotFound: i['shutdownState'] = _state_description(vm_states.DELETED, True) instances_set.append(i) return {'instancesSet': instances_set} def _format_instance_bdm(self, context, instance_uuid, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, instance_uuid): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '-', 'status': vol['status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or block_device.DEFAULT_ROOT_DEV_NAME) @staticmethod def _format_instance_type(instance, result): if instance['instance_type']: result['instanceType'] = instance['instance_type'].get('name') else: result['instanceType'] = None @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) try: instance = self.compute_api.get(context, internal_id) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts, sort_dir='asc') except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id image_uuid = instance['image_ref'] i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid) self._format_kernel_id(context, instance, i, 'kernelId') self._format_ramdisk_id(context, instance, i, 'ramdiskId') i['instanceState'] = _state_description( instance['vm_state'], instance['shutdown_terminate']) fixed_ip = None floating_ip = None ip_info = ec2utils.get_ip_info_for_instance(context, instance) if ip_info['fixed_ips']: fixed_ip = ip_info['fixed_ips'][0] if ip_info['floating_ips']: floating_ip = ip_info['floating_ips'][0] if ip_info['fixed_ip6s']: i['dnsNameV6'] = ip_info['fixed_ip6s'][0] if FLAGS.ec2_private_dns_show_ip: i['privateDnsName'] = fixed_ip else: i['privateDnsName'] = instance['hostname'] i['privateIpAddress'] = fixed_ip i['publicDnsName'] = floating_ip i['ipAddress'] = floating_ip or fixed_ip i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance['uuid'], i['rootDeviceName'], i) host = instance['host'] services = db.service_get_all_by_host(context.elevated(), host) zone = ec2utils.get_availability_zone_by_host(services, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, public_ip=None, **kwargs): if public_ip: floatings = [] for address in public_ip: floating = self.network_api.get_floating_ip_by_address(context, address) floatings.append(floating) else: floatings = self.network_api.get_floating_ips_by_project(context) addresses = [self._format_address(context, f) for f in floatings] return {'addressesSet': addresses} def _format_address(self, context, floating_ip): ec2_id = None if floating_ip['fixed_ip_id']: fixed_id = floating_ip['fixed_ip_id'] fixed = self.network_api.get_fixed_ip(context, fixed_id) if fixed['instance_id'] is not None: ec2_id = ec2utils.id_to_ec2_id(fixed['instance_id']) address = {'public_ip': floating_ip['address'], 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address['instance_id'], floating_ip['project_id']) address['instance_id'] = details return address def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) public_ip = self.network_api.allocate_floating_ip(context) return {'publicIp': public_ip} def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) try: self.network_api.release_floating_ip(context, address=public_ip) return {'return': "true"} except exception.FloatingIpNotFound: raise exception.EC2APIError(_('Unable to release IP Address.')) def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) try: self.compute_api.associate_floating_ip(context, instance, address=public_ip) return {'return': "true"} except exception.FloatingIpNotFound: raise exception.EC2APIError(_('Unable to associate IP Address.')) def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, address=public_ip) return {'return': "true"} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = ec2utils.id_to_glance_id(context, kernel['id']) if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context, ramdisk['id']) for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) image_uuid = ec2utils.id_to_glance_id(context, image['id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFound(image_id=kwargs['image_id']) if image_state != 'available': raise exception.EC2APIError(_('Image must be available')) (instances, resv_id) = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_href=image_uuid, min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, resv_id) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) previous_states = [] for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) previous_states.append(instance) self.compute_api.delete(context, instance) return self._format_terminate_instances(context, instance_id, previous_states) def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.reboot(context, instance, 'HARD') return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to stop instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.stop(context, instance) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to start instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.start(context, instance) return True def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if ec2utils.image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by GlanceImageService to S3 format.""" i = {} image_type = ec2utils.image_type(image.get('container_format')) ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari') i['imageOwnerId'] = image.get('owner') img_loc = image['properties'].get('image_location') if img_loc: i['imageLocation'] = img_loc else: i['imageLocation'] = "%s (%s)" % (img_loc, name) i['name'] = name if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name i['name'] = img_loc i['imageState'] = self._get_image_state(image) i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = not not image.get('is_public') i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or block_device.DEFAULT_ROOT_DEV_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return {'imageId': image_id} def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = ec2utils.image_type(image.get('container_format')) image_id = ec2utils.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and kwargs.get('name'): image_location = kwargs['name'] if image_location is None: raise exception.EC2APIError(_('imageLocation is required')) metadata = {'properties': {'image_location': image_location}} if kwargs.get('name'): metadata['name'] = kwargs['name'] else: metadata['name'] = image_location if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = kwargs.get( 'root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): _prop_root_dev_name = block_device.properties_root_device_name result['rootDeviceName'] = _prop_root_dev_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.EC2APIError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.EC2APIError(_('attribute not supported: %s') % attribute) if not 'user_group' in kwargs: raise exception.EC2APIError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.EC2APIError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.EC2APIError(msg) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') try: return self.image_service.update(context, internal_id, image) except exception.ImageNotAuthorized: msg = _('Not allowed to modify attributes for image %s') raise exception.EC2APIError(msg % image_id) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) validate_ec2_id(instance_id) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF): restart_instance = True self.compute_api.stop(context, instance) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 if time.time() > start_time + timeout: raise exception.EC2APIError( _('Couldn\'t stop instance with in %d sec') % timeout) src_image = self._get_image(context, instance['image_ref']) properties = src_image['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] mapping = [] bdms = db.block_device_mapping_get_all_by_instance(context, instance_id) for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( context, volume, volume['display_name'], volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in _properties_get_mappings(properties): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): src_image.pop(attr, None) image_id = self._register_image(context, src_image) if restart_instance: self.compute_api.start(context, instance_id=instance_id) return {'imageId': image_id}
./CrossVul/dataset_final_sorted/CWE-20/py/good_3660_0
crossvul-python_data_bad_2816_0
# -*- coding: utf-8 -*- ''' The crypt module manages all of the cryptography functions for minions and masters, encrypting and decrypting payloads, preparing messages, and authenticating peers ''' # Import python libs from __future__ import absolute_import, print_function import os import sys import copy import time import hmac import base64 import hashlib import logging import stat import traceback import binascii import weakref import getpass # Import third party libs import salt.ext.six as six from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin try: from Cryptodome.Cipher import AES, PKCS1_OAEP from Cryptodome.Hash import SHA from Cryptodome.PublicKey import RSA from Cryptodome.Signature import PKCS1_v1_5 import Cryptodome.Random # pylint: disable=W0611 CDOME = True except ImportError: CDOME = False if not CDOME: try: from Crypto.Cipher import AES, PKCS1_OAEP from Crypto.Hash import SHA from Crypto.PublicKey import RSA from Crypto.Signature import PKCS1_v1_5 # let this be imported, if possible import Crypto.Random # pylint: disable=W0611 except ImportError: # No need for crypt in local mode pass # Import salt libs import salt.defaults.exitcodes import salt.utils import salt.utils.decorators import salt.payload import salt.transport.client import salt.transport.frame import salt.utils.rsax931 import salt.utils.verify import salt.version from salt.exceptions import ( AuthenticationError, SaltClientError, SaltReqTimeoutError ) import tornado.gen log = logging.getLogger(__name__) def dropfile(cachedir, user=None): ''' Set an AES dropfile to request the master update the publish session key ''' dfn = os.path.join(cachedir, '.dfn') # set a mask (to avoid a race condition on file creation) and store original. mask = os.umask(191) try: log.info('Rotating AES key') if os.path.isfile(dfn): log.info('AES key rotation already requested') return if os.path.isfile(dfn) and not os.access(dfn, os.W_OK): os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR) with salt.utils.fopen(dfn, 'wb+') as fp_: fp_.write(b'') os.chmod(dfn, stat.S_IRUSR) if user: try: import pwd uid = pwd.getpwnam(user).pw_uid os.chown(dfn, uid, -1) except (KeyError, ImportError, OSError, IOError): pass finally: os.umask(mask) # restore original umask def gen_keys(keydir, keyname, keysize, user=None): ''' Generate a RSA public keypair for use with salt :param str keydir: The directory to write the keypair to :param str keyname: The type of salt server for whom this key should be written. (i.e. 'master' or 'minion') :param int keysize: The number of bits in the key :param str user: The user on the system who should own this keypair :rtype: str :return: Path on the filesystem to the RSA private key ''' base = os.path.join(keydir, keyname) priv = '{0}.pem'.format(base) pub = '{0}.pub'.format(base) salt.utils.reinit_crypto() gen = RSA.generate(bits=keysize, e=65537) if os.path.isfile(priv): # Between first checking and the generation another process has made # a key! Use the winner's key return priv # Do not try writing anything, if directory has no permissions. if not os.access(keydir, os.W_OK): raise IOError('Write access denied to "{0}" for user "{1}".'.format(os.path.abspath(keydir), getpass.getuser())) cumask = os.umask(191) with salt.utils.fopen(priv, 'wb+') as f: f.write(gen.exportKey('PEM')) os.umask(cumask) with salt.utils.fopen(pub, 'wb+') as f: f.write(gen.publickey().exportKey('PEM')) os.chmod(priv, 256) if user: try: import pwd uid = pwd.getpwnam(user).pw_uid os.chown(priv, uid, -1) os.chown(pub, uid, -1) except (KeyError, ImportError, OSError): # The specified user was not found, allow the backup systems to # report the error pass return priv @salt.utils.decorators.memoize def _get_key_with_evict(path, timestamp): ''' Load a key from disk. `timestamp` above is intended to be the timestamp of the file's last modification. This fn is memoized so if it is called with the same path and timestamp (the file's last modified time) the second time the result is returned from the memoiziation. If the file gets modified then the params are different and the key is loaded from disk. ''' log.debug('salt.crypt._get_key_with_evict: Loading private key') with salt.utils.fopen(path) as f: key = RSA.importKey(f.read()) return key def _get_rsa_key(path): ''' Read a key off the disk. Poor man's simple cache in effect here, we memoize the result of calling _get_rsa_with_evict. This means the first time _get_key_with_evict is called with a path and a timestamp the result is cached. If the file (the private key) does not change then its timestamp will not change and the next time the result is returned from the cache. If the key DOES change the next time _get_rsa_with_evict is called it is called with different parameters and the fn is run fully to retrieve the key from disk. ''' log.debug('salt.crypt._get_rsa_key: Loading private key') return _get_key_with_evict(path, str(os.path.getmtime(path))) def sign_message(privkey_path, message): ''' Use Crypto.Signature.PKCS1_v1_5 to sign a message. Returns the signature. ''' key = _get_rsa_key(privkey_path) log.debug('salt.crypt.sign_message: Signing message.') signer = PKCS1_v1_5.new(key) return signer.sign(SHA.new(message)) def verify_signature(pubkey_path, message, signature): ''' Use Crypto.Signature.PKCS1_v1_5 to verify the signature on a message. Returns True for valid signature. ''' log.debug('salt.crypt.verify_signature: Loading public key') with salt.utils.fopen(pubkey_path) as f: pubkey = RSA.importKey(f.read()) log.debug('salt.crypt.verify_signature: Verifying signature') verifier = PKCS1_v1_5.new(pubkey) return verifier.verify(SHA.new(message), signature) def gen_signature(priv_path, pub_path, sign_path): ''' creates a signature for the given public-key with the given private key and writes it to sign_path ''' with salt.utils.fopen(pub_path) as fp_: mpub_64 = fp_.read() mpub_sig = sign_message(priv_path, mpub_64) mpub_sig_64 = binascii.b2a_base64(mpub_sig) if os.path.isfile(sign_path): return False log.trace('Calculating signature for {0} with {1}' .format(os.path.basename(pub_path), os.path.basename(priv_path))) if os.path.isfile(sign_path): log.trace('Signature file {0} already exists, please ' 'remove it first and try again'.format(sign_path)) else: with salt.utils.fopen(sign_path, 'wb+') as sig_f: sig_f.write(salt.utils.to_bytes(mpub_sig_64)) log.trace('Wrote signature to {0}'.format(sign_path)) return True def private_encrypt(key, message): ''' Generate an M2Crypto-compatible signature :param Crypto.PublicKey.RSA._RSAobj key: The RSA key object :param str message: The message to sign :rtype: str :return: The signature, or an empty string if the signature operation failed ''' signer = salt.utils.rsax931.RSAX931Signer(key.exportKey('PEM')) return signer.sign(message) def public_decrypt(pub, message): ''' Verify an M2Crypto-compatible signature :param Crypto.PublicKey.RSA._RSAobj key: The RSA public key object :param str message: The signed message to verify :rtype: str :return: The message (or digest) recovered from the signature, or an empty string if the verification failed ''' verifier = salt.utils.rsax931.RSAX931Verifier(pub.exportKey('PEM')) return verifier.verify(message) class MasterKeys(dict): ''' The Master Keys class is used to manage the RSA public key pair used for authentication by the master. It also generates a signing key-pair if enabled with master_sign_key_name. ''' def __init__(self, opts): super(MasterKeys, self).__init__() self.opts = opts self.pub_path = os.path.join(self.opts['pki_dir'], 'master.pub') self.rsa_path = os.path.join(self.opts['pki_dir'], 'master.pem') self.key = self.__get_keys() self.pub_signature = None # set names for the signing key-pairs if opts['master_sign_pubkey']: # if only the signature is available, use that if opts['master_use_pubkey_signature']: self.sig_path = os.path.join(self.opts['pki_dir'], opts['master_pubkey_signature']) if os.path.isfile(self.sig_path): with salt.utils.fopen(self.sig_path) as fp_: self.pub_signature = fp_.read() log.info('Read {0}\'s signature from {1}' ''.format(os.path.basename(self.pub_path), self.opts['master_pubkey_signature'])) else: log.error('Signing the master.pub key with a signature is enabled ' 'but no signature file found at the defined location ' '{0}'.format(self.sig_path)) log.error('The signature-file may be either named differently ' 'or has to be created with \'salt-key --gen-signature\'') sys.exit(1) # create a new signing key-pair to sign the masters # auth-replies when a minion tries to connect else: self.pub_sign_path = os.path.join(self.opts['pki_dir'], opts['master_sign_key_name'] + '.pub') self.rsa_sign_path = os.path.join(self.opts['pki_dir'], opts['master_sign_key_name'] + '.pem') self.sign_key = self.__get_keys(name=opts['master_sign_key_name']) # We need __setstate__ and __getstate__ to avoid pickling errors since # some of the member variables correspond to Cython objects which are # not picklable. # These methods are only used when pickling so will not be used on # non-Windows platforms. def __setstate__(self, state): self.__init__(state['opts']) def __getstate__(self): return {'opts': self.opts} def __get_keys(self, name='master'): ''' Returns a key object for a key in the pki-dir ''' path = os.path.join(self.opts['pki_dir'], name + '.pem') if os.path.exists(path): with salt.utils.fopen(path) as f: key = RSA.importKey(f.read()) log.debug('Loaded {0} key: {1}'.format(name, path)) else: log.info('Generating {0} keys: {1}'.format(name, self.opts['pki_dir'])) gen_keys(self.opts['pki_dir'], name, self.opts['keysize'], self.opts.get('user')) with salt.utils.fopen(self.rsa_path) as f: key = RSA.importKey(f.read()) return key def get_pub_str(self, name='master'): ''' Return the string representation of a public key in the pki-directory ''' path = os.path.join(self.opts['pki_dir'], name + '.pub') if not os.path.isfile(path): key = self.__get_keys() with salt.utils.fopen(path, 'wb+') as wfh: wfh.write(key.publickey().exportKey('PEM')) with salt.utils.fopen(path) as rfh: return rfh.read() def get_mkey_paths(self): return self.pub_path, self.rsa_path def get_sign_paths(self): return self.pub_sign_path, self.rsa_sign_path def pubkey_signature(self): ''' returns the base64 encoded signature from the signature file or None if the master has its own signing keys ''' return self.pub_signature class AsyncAuth(object): ''' Set up an Async object to maintain authentication with the salt master ''' # This class is only a singleton per minion/master pair # mapping of io_loop -> {key -> auth} instance_map = weakref.WeakKeyDictionary() # mapping of key -> creds creds_map = {} def __new__(cls, opts, io_loop=None): ''' Only create one instance of AsyncAuth per __key() ''' # do we have any mapping for this io_loop io_loop = io_loop or tornado.ioloop.IOLoop.current() if io_loop not in AsyncAuth.instance_map: AsyncAuth.instance_map[io_loop] = weakref.WeakValueDictionary() loop_instance_map = AsyncAuth.instance_map[io_loop] key = cls.__key(opts) auth = loop_instance_map.get(key) if auth is None: log.debug('Initializing new AsyncAuth for {0}'.format(key)) # we need to make a local variable for this, as we are going to store # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller auth = object.__new__(cls) auth.__singleton_init__(opts, io_loop=io_loop) loop_instance_map[key] = auth else: log.debug('Re-using AsyncAuth for {0}'.format(key)) return auth @classmethod def __key(cls, opts, io_loop=None): return (opts['pki_dir'], # where the keys are stored opts['id'], # minion ID opts['master_uri'], # master ID ) # has to remain empty for singletons, since __init__ will *always* be called def __init__(self, opts, io_loop=None): pass # an init for the singleton instance to call def __singleton_init__(self, opts, io_loop=None): ''' Init an Auth instance :param dict opts: Options for this server :return: Auth instance :rtype: Auth ''' self.opts = opts if six.PY2: self.token = Crypticle.generate_key_string() else: self.token = salt.utils.to_bytes(Crypticle.generate_key_string()) self.serial = salt.payload.Serial(self.opts) self.pub_path = os.path.join(self.opts['pki_dir'], 'minion.pub') self.rsa_path = os.path.join(self.opts['pki_dir'], 'minion.pem') if self.opts['__role'] == 'syndic': self.mpub = 'syndic_master.pub' else: self.mpub = 'minion_master.pub' if not os.path.isfile(self.pub_path): self.get_keys() self.io_loop = io_loop or tornado.ioloop.IOLoop.current() salt.utils.reinit_crypto() key = self.__key(self.opts) # TODO: if we already have creds for this key, lets just re-use if key in AsyncAuth.creds_map: creds = AsyncAuth.creds_map[key] self._creds = creds self._crypticle = Crypticle(self.opts, creds['aes']) self._authenticate_future = tornado.concurrent.Future() self._authenticate_future.set_result(True) else: self.authenticate() def __deepcopy__(self, memo): cls = self.__class__ result = cls.__new__(cls, copy.deepcopy(self.opts, memo), io_loop=None) memo[id(self)] = result for key in self.__dict__: if key in ('io_loop',): # The io_loop has a thread Lock which will fail to be deep # copied. Skip it because it will just be recreated on the # new copy. continue setattr(result, key, copy.deepcopy(self.__dict__[key], memo)) return result @property def creds(self): return self._creds @property def crypticle(self): return self._crypticle @property def authenticated(self): return hasattr(self, '_authenticate_future') and \ self._authenticate_future.done() and \ self._authenticate_future.exception() is None def invalidate(self): if self.authenticated: del self._authenticate_future key = self.__key(self.opts) if key in AsyncAuth.creds_map: del AsyncAuth.creds_map[key] def authenticate(self, callback=None): ''' Ask for this client to reconnect to the origin This function will de-dupe all calls here and return a *single* future for the sign-in-- whis way callers can all assume there aren't others ''' # if an auth is in flight-- and not done-- just pass that back as the future to wait on if hasattr(self, '_authenticate_future') and not self._authenticate_future.done(): future = self._authenticate_future else: future = tornado.concurrent.Future() self._authenticate_future = future self.io_loop.add_callback(self._authenticate) if callback is not None: def handle_future(future): response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) return future @tornado.gen.coroutine def _authenticate(self): ''' Authenticate with the master, this method breaks the functional paradigm, it will update the master information from a fresh sign in, signing in can occur as often as needed to keep up with the revolving master AES key. :rtype: Crypticle :returns: A crypticle used for encryption operations ''' acceptance_wait_time = self.opts['acceptance_wait_time'] acceptance_wait_time_max = self.opts['acceptance_wait_time_max'] if not acceptance_wait_time_max: acceptance_wait_time_max = acceptance_wait_time creds = None channel = salt.transport.client.AsyncReqChannel.factory(self.opts, crypt='clear', io_loop=self.io_loop) error = None while True: try: creds = yield self.sign_in(channel=channel) except SaltClientError as exc: error = exc break if creds == 'retry': if self.opts.get('detect_mode') is True: error = SaltClientError('Detect mode is on') break if self.opts.get('caller'): print('Minion failed to authenticate with the master, ' 'has the minion key been accepted?') sys.exit(2) if acceptance_wait_time: log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time)) yield tornado.gen.sleep(acceptance_wait_time) if acceptance_wait_time < acceptance_wait_time_max: acceptance_wait_time += acceptance_wait_time log.debug('Authentication wait time is {0}'.format(acceptance_wait_time)) continue break if not isinstance(creds, dict) or 'aes' not in creds: if self.opts.get('detect_mode') is True: error = SaltClientError('-|RETRY|-') try: del AsyncAuth.creds_map[self.__key(self.opts)] except KeyError: pass if not error: error = SaltClientError('Attempt to authenticate with the salt master failed') self._authenticate_future.set_exception(error) else: key = self.__key(self.opts) AsyncAuth.creds_map[key] = creds self._creds = creds self._crypticle = Crypticle(self.opts, creds['aes']) self._authenticate_future.set_result(True) # mark the sign-in as complete # Notify the bus about creds change event = salt.utils.event.get_event(self.opts.get('__role'), opts=self.opts, listen=False) event.fire_event({'key': key, 'creds': creds}, salt.utils.event.tagify(prefix='auth', suffix='creds')) @tornado.gen.coroutine def sign_in(self, timeout=60, safe=True, tries=1, channel=None): ''' Send a sign in request to the master, sets the key information and returns a dict containing the master publish interface to bind to and the decrypted aes key for transport decryption. :param int timeout: Number of seconds to wait before timing out the sign-in request :param bool safe: If True, do not raise an exception on timeout. Retry instead. :param int tries: The number of times to try to authenticate before giving up. :raises SaltReqTimeoutError: If the sign-in request has timed out and :param safe: is not set :return: Return a string on failure indicating the reason for failure. On success, return a dictionary with the publication port and the shared AES key. ''' auth = {} auth_timeout = self.opts.get('auth_timeout', None) if auth_timeout is not None: timeout = auth_timeout auth_safemode = self.opts.get('auth_safemode', None) if auth_safemode is not None: safe = auth_safemode auth_tries = self.opts.get('auth_tries', None) if auth_tries is not None: tries = auth_tries m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub) auth['master_uri'] = self.opts['master_uri'] if not channel: channel = salt.transport.client.AsyncReqChannel.factory(self.opts, crypt='clear', io_loop=self.io_loop) sign_in_payload = self.minion_sign_in_payload() try: payload = yield channel.send( sign_in_payload, tries=tries, timeout=timeout ) except SaltReqTimeoutError as e: if safe: log.warning('SaltReqTimeoutError: {0}'.format(e)) raise tornado.gen.Return('retry') if self.opts.get('detect_mode') is True: raise tornado.gen.Return('retry') else: raise SaltClientError('Attempt to authenticate with the salt master failed with timeout error') if 'load' in payload: if 'ret' in payload['load']: if not payload['load']['ret']: if self.opts['rejected_retry']: log.error( 'The Salt Master has rejected this minion\'s public ' 'key.\nTo repair this issue, delete the public key ' 'for this minion on the Salt Master.\nThe Salt ' 'Minion will attempt to to re-authenicate.' ) raise tornado.gen.Return('retry') else: log.critical( 'The Salt Master has rejected this minion\'s public ' 'key!\nTo repair this issue, delete the public key ' 'for this minion on the Salt Master and restart this ' 'minion.\nOr restart the Salt Master in open mode to ' 'clean out the keys. The Salt Minion will now exit.' ) sys.exit(salt.defaults.exitcodes.EX_OK) # has the master returned that its maxed out with minions? elif payload['load']['ret'] == 'full': raise tornado.gen.Return('full') else: log.error( 'The Salt Master has cached the public key for this ' 'node, this salt minion will wait for {0} seconds ' 'before attempting to re-authenticate'.format( self.opts['acceptance_wait_time'] ) ) raise tornado.gen.Return('retry') auth['aes'] = self.verify_master(payload, master_pub='token' in sign_in_payload) if not auth['aes']: log.critical( 'The Salt Master server\'s public key did not authenticate!\n' 'The master may need to be updated if it is a version of Salt ' 'lower than {0}, or\n' 'If you are confident that you are connecting to a valid Salt ' 'Master, then remove the master public key and restart the ' 'Salt Minion.\nThe master public key can be found ' 'at:\n{1}'.format(salt.version.__version__, m_pub_fn) ) raise SaltClientError('Invalid master key') if self.opts.get('syndic_master', False): # Is syndic syndic_finger = self.opts.get('syndic_finger', self.opts.get('master_finger', False)) if syndic_finger: if salt.utils.pem_finger(m_pub_fn, sum_type=self.opts['hash_type']) != syndic_finger: self._finger_fail(syndic_finger, m_pub_fn) else: if self.opts.get('master_finger', False): if salt.utils.pem_finger(m_pub_fn, sum_type=self.opts['hash_type']) != self.opts['master_finger']: self._finger_fail(self.opts['master_finger'], m_pub_fn) auth['publish_port'] = payload['publish_port'] raise tornado.gen.Return(auth) def get_keys(self): ''' Return keypair object for the minion. :rtype: Crypto.PublicKey.RSA._RSAobj :return: The RSA keypair ''' # Make sure all key parent directories are accessible user = self.opts.get('user', 'root') salt.utils.verify.check_path_traversal(self.opts['pki_dir'], user) if os.path.exists(self.rsa_path): with salt.utils.fopen(self.rsa_path) as f: key = RSA.importKey(f.read()) log.debug('Loaded minion key: {0}'.format(self.rsa_path)) else: log.info('Generating keys: {0}'.format(self.opts['pki_dir'])) gen_keys(self.opts['pki_dir'], 'minion', self.opts['keysize'], self.opts.get('user')) with salt.utils.fopen(self.rsa_path) as f: key = RSA.importKey(f.read()) return key def gen_token(self, clear_tok): ''' Encrypt a string with the minion private key to verify identity with the master. :param str clear_tok: A plaintext token to encrypt :return: Encrypted token :rtype: str ''' return private_encrypt(self.get_keys(), clear_tok) def minion_sign_in_payload(self): ''' Generates the payload used to authenticate with the master server. This payload consists of the passed in id_ and the ssh public key to encrypt the AES key sent back from the master. :return: Payload dictionary :rtype: dict ''' payload = {} payload['cmd'] = '_auth' payload['id'] = self.opts['id'] try: pubkey_path = os.path.join(self.opts['pki_dir'], self.mpub) with salt.utils.fopen(pubkey_path) as f: pub = RSA.importKey(f.read()) cipher = PKCS1_OAEP.new(pub) payload['token'] = cipher.encrypt(self.token) except Exception: pass with salt.utils.fopen(self.pub_path) as f: payload['pub'] = f.read() return payload def decrypt_aes(self, payload, master_pub=True): ''' This function is used to decrypt the AES seed phrase returned from the master server. The seed phrase is decrypted with the SSH RSA host key. Pass in the encrypted AES key. Returns the decrypted AES seed key, a string :param dict payload: The incoming payload. This is a dictionary which may have the following keys: 'aes': The shared AES key 'enc': The format of the message. ('clear', 'pub', etc) 'sig': The message signature 'publish_port': The TCP port which published the message 'token': The encrypted token used to verify the message. 'pub_key': The public key of the sender. :rtype: str :return: The decrypted token that was provided, with padding. :rtype: str :return: The decrypted AES seed key ''' if self.opts.get('auth_trb', False): log.warning( 'Auth Called: {0}'.format( ''.join(traceback.format_stack()) ) ) else: log.debug('Decrypting the current master AES key') key = self.get_keys() cipher = PKCS1_OAEP.new(key) key_str = cipher.decrypt(payload['aes']) if 'sig' in payload: m_path = os.path.join(self.opts['pki_dir'], self.mpub) if os.path.exists(m_path): try: with salt.utils.fopen(m_path) as f: mkey = RSA.importKey(f.read()) except Exception: return '', '' digest = hashlib.sha256(key_str).hexdigest() if six.PY3: digest = salt.utils.to_bytes(digest) m_digest = public_decrypt(mkey.publickey(), payload['sig']) if m_digest != digest: return '', '' else: return '', '' if six.PY3: key_str = salt.utils.to_str(key_str) if '_|-' in key_str: return key_str.split('_|-') else: if 'token' in payload: token = cipher.decrypt(payload['token']) return key_str, token elif not master_pub: return key_str, '' return '', '' def verify_pubkey_sig(self, message, sig): ''' Wraps the verify_signature method so we have additional checks. :rtype: bool :return: Success or failure of public key verification ''' if self.opts['master_sign_key_name']: path = os.path.join(self.opts['pki_dir'], self.opts['master_sign_key_name'] + '.pub') if os.path.isfile(path): res = verify_signature(path, message, binascii.a2b_base64(sig)) else: log.error('Verification public key {0} does not exist. You ' 'need to copy it from the master to the minions ' 'pki directory'.format(os.path.basename(path))) return False if res: log.debug('Successfully verified signature of master ' 'public key with verification public key ' '{0}'.format(self.opts['master_sign_key_name'] + '.pub')) return True else: log.debug('Failed to verify signature of public key') return False else: log.error('Failed to verify the signature of the message because ' 'the verification key-pairs name is not defined. Please ' 'make sure that master_sign_key_name is defined.') return False def verify_signing_master(self, payload): try: if self.verify_pubkey_sig(payload['pub_key'], payload['pub_sig']): log.info('Received signed and verified master pubkey ' 'from master {0}'.format(self.opts['master'])) m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub) uid = salt.utils.get_uid(self.opts.get('user', None)) with salt.utils.fpopen(m_pub_fn, 'wb+', uid=uid) as wfh: wfh.write(salt.utils.to_bytes(payload['pub_key'])) return True else: log.error('Received signed public-key from master {0} ' 'but signature verification failed!'.format(self.opts['master'])) return False except Exception as sign_exc: log.error('There was an error while verifying the masters public-key signature') raise Exception(sign_exc) def check_auth_deps(self, payload): ''' Checks if both master and minion either sign (master) and verify (minion). If one side does not, it should fail. :param dict payload: The incoming payload. This is a dictionary which may have the following keys: 'aes': The shared AES key 'enc': The format of the message. ('clear', 'pub', 'aes') 'publish_port': The TCP port which published the message 'token': The encrypted token used to verify the message. 'pub_key': The RSA public key of the sender. ''' # master and minion sign and verify if 'pub_sig' in payload and self.opts['verify_master_pubkey_sign']: return True # master and minion do NOT sign and do NOT verify elif 'pub_sig' not in payload and not self.opts['verify_master_pubkey_sign']: return True # master signs, but minion does NOT verify elif 'pub_sig' in payload and not self.opts['verify_master_pubkey_sign']: log.error('The masters sent its public-key signature, but signature ' 'verification is not enabled on the minion. Either enable ' 'signature verification on the minion or disable signing ' 'the public key on the master!') return False # master does NOT sign but minion wants to verify elif 'pub_sig' not in payload and self.opts['verify_master_pubkey_sign']: log.error('The master did not send its public-key signature, but ' 'signature verification is enabled on the minion. Either ' 'disable signature verification on the minion or enable ' 'signing the public on the master!') return False def extract_aes(self, payload, master_pub=True): ''' Return the AES key received from the master after the minion has been successfully authenticated. :param dict payload: The incoming payload. This is a dictionary which may have the following keys: 'aes': The shared AES key 'enc': The format of the message. ('clear', 'pub', etc) 'publish_port': The TCP port which published the message 'token': The encrypted token used to verify the message. 'pub_key': The RSA public key of the sender. :rtype: str :return: The shared AES key received from the master. ''' if master_pub: try: aes, token = self.decrypt_aes(payload, master_pub) if token != self.token: log.error( 'The master failed to decrypt the random minion token' ) return '' except Exception: log.error( 'The master failed to decrypt the random minion token' ) return '' return aes else: aes, token = self.decrypt_aes(payload, master_pub) return aes def verify_master(self, payload, master_pub=True): ''' Verify that the master is the same one that was previously accepted. :param dict payload: The incoming payload. This is a dictionary which may have the following keys: 'aes': The shared AES key 'enc': The format of the message. ('clear', 'pub', etc) 'publish_port': The TCP port which published the message 'token': The encrypted token used to verify the message. 'pub_key': The RSA public key of the sender. :param bool master_pub: Operate as if minion had no master pubkey when it sent auth request, i.e. don't verify the minion signature :rtype: str :return: An empty string on verification failure. On success, the decrypted AES message in the payload. ''' m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub) m_pub_exists = os.path.isfile(m_pub_fn) if m_pub_exists and master_pub and not self.opts['open_mode']: with salt.utils.fopen(m_pub_fn) as fp_: local_master_pub = fp_.read() if payload['pub_key'].replace('\n', '').replace('\r', '') != \ local_master_pub.replace('\n', '').replace('\r', ''): if not self.check_auth_deps(payload): return '' if self.opts['verify_master_pubkey_sign']: if self.verify_signing_master(payload): return self.extract_aes(payload, master_pub=False) else: return '' else: # This is not the last master we connected to log.error('The master key has changed, the salt master could ' 'have been subverted, verify salt master\'s public ' 'key') return '' else: if not self.check_auth_deps(payload): return '' # verify the signature of the pubkey even if it has # not changed compared with the one we already have if self.opts['always_verify_signature']: if self.verify_signing_master(payload): return self.extract_aes(payload) else: log.error('The masters public could not be verified. Is the ' 'verification pubkey {0} up to date?' ''.format(self.opts['master_sign_key_name'] + '.pub')) return '' else: return self.extract_aes(payload) else: if not self.check_auth_deps(payload): return '' # verify the masters pubkey signature if the minion # has not received any masters pubkey before if self.opts['verify_master_pubkey_sign']: if self.verify_signing_master(payload): return self.extract_aes(payload, master_pub=False) else: return '' else: if not m_pub_exists: # the minion has not received any masters pubkey yet, write # the newly received pubkey to minion_master.pub with salt.utils.fopen(m_pub_fn, 'wb+') as fp_: fp_.write(salt.utils.to_bytes(payload['pub_key'])) return self.extract_aes(payload, master_pub=False) def _finger_fail(self, finger, master_key): log.critical( 'The specified fingerprint in the master configuration ' 'file:\n{0}\nDoes not match the authenticating master\'s ' 'key:\n{1}\nVerify that the configured fingerprint ' 'matches the fingerprint of the correct master and that ' 'this minion is not subject to a man-in-the-middle attack.' .format( finger, salt.utils.pem_finger(master_key, sum_type=self.opts['hash_type']) ) ) sys.exit(42) # TODO: remove, we should just return a sync wrapper of AsyncAuth class SAuth(AsyncAuth): ''' Set up an object to maintain authentication with the salt master ''' # This class is only a singleton per minion/master pair instances = weakref.WeakValueDictionary() def __new__(cls, opts, io_loop=None): ''' Only create one instance of SAuth per __key() ''' key = cls.__key(opts) auth = SAuth.instances.get(key) if auth is None: log.debug('Initializing new SAuth for {0}'.format(key)) auth = object.__new__(cls) auth.__singleton_init__(opts) SAuth.instances[key] = auth else: log.debug('Re-using SAuth for {0}'.format(key)) return auth @classmethod def __key(cls, opts, io_loop=None): return (opts['pki_dir'], # where the keys are stored opts['id'], # minion ID opts['master_uri'], # master ID ) # has to remain empty for singletons, since __init__ will *always* be called def __init__(self, opts, io_loop=None): super(SAuth, self).__init__(opts, io_loop=io_loop) # an init for the singleton instance to call def __singleton_init__(self, opts, io_loop=None): ''' Init an Auth instance :param dict opts: Options for this server :return: Auth instance :rtype: Auth ''' self.opts = opts if six.PY2: self.token = Crypticle.generate_key_string() else: self.token = salt.utils.to_bytes(Crypticle.generate_key_string()) self.serial = salt.payload.Serial(self.opts) self.pub_path = os.path.join(self.opts['pki_dir'], 'minion.pub') self.rsa_path = os.path.join(self.opts['pki_dir'], 'minion.pem') if 'syndic_master' in self.opts: self.mpub = 'syndic_master.pub' elif 'alert_master' in self.opts: self.mpub = 'monitor_master.pub' else: self.mpub = 'minion_master.pub' if not os.path.isfile(self.pub_path): self.get_keys() @property def creds(self): if not hasattr(self, '_creds'): self.authenticate() return self._creds @property def crypticle(self): if not hasattr(self, '_crypticle'): self.authenticate() return self._crypticle def authenticate(self, _=None): # TODO: remove unused var ''' Authenticate with the master, this method breaks the functional paradigm, it will update the master information from a fresh sign in, signing in can occur as often as needed to keep up with the revolving master AES key. :rtype: Crypticle :returns: A crypticle used for encryption operations ''' acceptance_wait_time = self.opts['acceptance_wait_time'] acceptance_wait_time_max = self.opts['acceptance_wait_time_max'] channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear') if not acceptance_wait_time_max: acceptance_wait_time_max = acceptance_wait_time while True: creds = self.sign_in(channel=channel) if creds == 'retry': if self.opts.get('caller'): print('Minion failed to authenticate with the master, ' 'has the minion key been accepted?') sys.exit(2) if acceptance_wait_time: log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time)) time.sleep(acceptance_wait_time) if acceptance_wait_time < acceptance_wait_time_max: acceptance_wait_time += acceptance_wait_time log.debug('Authentication wait time is {0}'.format(acceptance_wait_time)) continue break self._creds = creds self._crypticle = Crypticle(self.opts, creds['aes']) def sign_in(self, timeout=60, safe=True, tries=1, channel=None): ''' Send a sign in request to the master, sets the key information and returns a dict containing the master publish interface to bind to and the decrypted aes key for transport decryption. :param int timeout: Number of seconds to wait before timing out the sign-in request :param bool safe: If True, do not raise an exception on timeout. Retry instead. :param int tries: The number of times to try to authenticate before giving up. :raises SaltReqTimeoutError: If the sign-in request has timed out and :param safe: is not set :return: Return a string on failure indicating the reason for failure. On success, return a dictionary with the publication port and the shared AES key. ''' auth = {} auth_timeout = self.opts.get('auth_timeout', None) if auth_timeout is not None: timeout = auth_timeout auth_safemode = self.opts.get('auth_safemode', None) if auth_safemode is not None: safe = auth_safemode auth_tries = self.opts.get('auth_tries', None) if auth_tries is not None: tries = auth_tries m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub) auth['master_uri'] = self.opts['master_uri'] if not channel: channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear') sign_in_payload = self.minion_sign_in_payload() try: payload = channel.send( sign_in_payload, tries=tries, timeout=timeout ) except SaltReqTimeoutError as e: if safe: log.warning('SaltReqTimeoutError: {0}'.format(e)) return 'retry' raise SaltClientError('Attempt to authenticate with the salt master failed with timeout error') if 'load' in payload: if 'ret' in payload['load']: if not payload['load']['ret']: if self.opts['rejected_retry']: log.error( 'The Salt Master has rejected this minion\'s public ' 'key.\nTo repair this issue, delete the public key ' 'for this minion on the Salt Master.\nThe Salt ' 'Minion will attempt to to re-authenicate.' ) return 'retry' else: log.critical( 'The Salt Master has rejected this minion\'s public ' 'key!\nTo repair this issue, delete the public key ' 'for this minion on the Salt Master and restart this ' 'minion.\nOr restart the Salt Master in open mode to ' 'clean out the keys. The Salt Minion will now exit.' ) sys.exit(salt.defaults.exitcodes.EX_OK) # has the master returned that its maxed out with minions? elif payload['load']['ret'] == 'full': return 'full' else: log.error( 'The Salt Master has cached the public key for this ' 'node. If this is the first time connecting to this master ' 'then this key may need to be accepted using \'salt-key -a {0}\' on ' 'the salt master. This salt minion will wait for {1} seconds ' 'before attempting to re-authenticate.'.format( self.opts['id'], self.opts['acceptance_wait_time'] ) ) return 'retry' auth['aes'] = self.verify_master(payload, master_pub='token' in sign_in_payload) if not auth['aes']: log.critical( 'The Salt Master server\'s public key did not authenticate!\n' 'The master may need to be updated if it is a version of Salt ' 'lower than {0}, or\n' 'If you are confident that you are connecting to a valid Salt ' 'Master, then remove the master public key and restart the ' 'Salt Minion.\nThe master public key can be found ' 'at:\n{1}'.format(salt.version.__version__, m_pub_fn) ) sys.exit(42) if self.opts.get('syndic_master', False): # Is syndic syndic_finger = self.opts.get('syndic_finger', self.opts.get('master_finger', False)) if syndic_finger: if salt.utils.pem_finger(m_pub_fn, sum_type=self.opts['hash_type']) != syndic_finger: self._finger_fail(syndic_finger, m_pub_fn) else: if self.opts.get('master_finger', False): if salt.utils.pem_finger(m_pub_fn, sum_type=self.opts['hash_type']) != self.opts['master_finger']: self._finger_fail(self.opts['master_finger'], m_pub_fn) auth['publish_port'] = payload['publish_port'] return auth class Crypticle(object): ''' Authenticated encryption class Encryption algorithm: AES-CBC Signing algorithm: HMAC-SHA256 ''' PICKLE_PAD = b'pickle::' AES_BLOCK_SIZE = 16 SIG_SIZE = hashlib.sha256().digest_size def __init__(self, opts, key_string, key_size=192): self.key_string = key_string self.keys = self.extract_keys(self.key_string, key_size) self.key_size = key_size self.serial = salt.payload.Serial(opts) @classmethod def generate_key_string(cls, key_size=192): key = os.urandom(key_size // 8 + cls.SIG_SIZE) b64key = base64.b64encode(key) if six.PY3: b64key = b64key.decode('utf-8') return b64key.replace('\n', '') @classmethod def extract_keys(cls, key_string, key_size): if six.PY2: key = key_string.decode('base64') else: key = salt.utils.to_bytes(base64.b64decode(key_string)) assert len(key) == key_size / 8 + cls.SIG_SIZE, 'invalid key' return key[:-cls.SIG_SIZE], key[-cls.SIG_SIZE:] def encrypt(self, data): ''' encrypt data with AES-CBC and sign it with HMAC-SHA256 ''' aes_key, hmac_key = self.keys pad = self.AES_BLOCK_SIZE - len(data) % self.AES_BLOCK_SIZE if six.PY2: data = data + pad * chr(pad) else: data = data + salt.utils.to_bytes(pad * chr(pad)) iv_bytes = os.urandom(self.AES_BLOCK_SIZE) cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes) data = iv_bytes + cypher.encrypt(data) sig = hmac.new(hmac_key, data, hashlib.sha256).digest() return data + sig def decrypt(self, data): ''' verify HMAC-SHA256 signature and decrypt data with AES-CBC ''' aes_key, hmac_key = self.keys sig = data[-self.SIG_SIZE:] data = data[:-self.SIG_SIZE] if six.PY3 and not isinstance(data, bytes): data = salt.utils.to_bytes(data) mac_bytes = hmac.new(hmac_key, data, hashlib.sha256).digest() if len(mac_bytes) != len(sig): log.debug('Failed to authenticate message') raise AuthenticationError('message authentication failed') result = 0 if six.PY2: for zipped_x, zipped_y in zip(mac_bytes, sig): result |= ord(zipped_x) ^ ord(zipped_y) else: for zipped_x, zipped_y in zip(mac_bytes, sig): result |= zipped_x ^ zipped_y if result != 0: log.debug('Failed to authenticate message') raise AuthenticationError('message authentication failed') iv_bytes = data[:self.AES_BLOCK_SIZE] data = data[self.AES_BLOCK_SIZE:] cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes) data = cypher.decrypt(data) if six.PY2: return data[:-ord(data[-1])] else: return data[:-data[-1]] def dumps(self, obj): ''' Serialize and encrypt a python object ''' return self.encrypt(self.PICKLE_PAD + self.serial.dumps(obj)) def loads(self, data, raw=False): ''' Decrypt and un-serialize a python object ''' data = self.decrypt(data) # simple integrity check to verify that we got meaningful data if not data.startswith(self.PICKLE_PAD): return {} load = self.serial.loads(data[len(self.PICKLE_PAD):], raw=raw) return load
./CrossVul/dataset_final_sorted/CWE-20/py/bad_2816_0
crossvul-python_data_bad_872_1
# -*- test-case-name: twisted.web.test.test_webclient,twisted.web.test.test_agent -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ HTTP client. """ from __future__ import division, absolute_import import os import collections import warnings try: from urlparse import urlunparse, urljoin, urldefrag except ImportError: from urllib.parse import urljoin, urldefrag from urllib.parse import urlunparse as _urlunparse def urlunparse(parts): result = _urlunparse(tuple([p.decode("charmap") for p in parts])) return result.encode("charmap") import zlib from functools import wraps from zope.interface import implementer from twisted.python.compat import _PY3, networkString from twisted.python.compat import nativeString, intToBytes, unicode, itervalues from twisted.python.deprecate import deprecatedModuleAttribute, deprecated from twisted.python.failure import Failure from incremental import Version from twisted.web.iweb import IPolicyForHTTPS, IAgentEndpointFactory from twisted.python.deprecate import getDeprecationWarningString from twisted.web import http from twisted.internet import defer, protocol, task, reactor from twisted.internet.abstract import isIPv6Address from twisted.internet.interfaces import IProtocol, IOpenSSLContextFactory from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.python.util import InsensitiveDict from twisted.python.components import proxyForInterface from twisted.web import error from twisted.web.iweb import UNKNOWN_LENGTH, IAgent, IBodyProducer, IResponse from twisted.web.http_headers import Headers from twisted.logger import Logger class PartialDownloadError(error.Error): """ Page was only partially downloaded, we got disconnected in middle. @ivar response: All of the response body which was downloaded. """ class HTTPPageGetter(http.HTTPClient): """ Gets a resource via HTTP, then quits. Typically used with L{HTTPClientFactory}. Note that this class does not, by itself, do anything with the response. If you want to download a resource into a file, use L{HTTPPageDownloader} instead. @ivar _completelyDone: A boolean indicating whether any further requests are necessary after this one completes in order to provide a result to C{self.factory.deferred}. If it is C{False}, then a redirect is going to be followed. Otherwise, this protocol's connection is the last one before firing the result Deferred. This is used to make sure the result Deferred is only fired after the connection is cleaned up. """ quietLoss = 0 followRedirect = True failed = 0 _completelyDone = True _specialHeaders = set((b'host', b'user-agent', b'cookie', b'content-length')) def connectionMade(self): method = getattr(self.factory, 'method', b'GET') self.sendCommand(method, self.factory.path) if self.factory.scheme == b'http' and self.factory.port != 80: host = self.factory.host + b':' + intToBytes(self.factory.port) elif self.factory.scheme == b'https' and self.factory.port != 443: host = self.factory.host + b':' + intToBytes(self.factory.port) else: host = self.factory.host self.sendHeader(b'Host', self.factory.headers.get(b"host", host)) self.sendHeader(b'User-Agent', self.factory.agent) data = getattr(self.factory, 'postdata', None) if data is not None: self.sendHeader(b"Content-Length", intToBytes(len(data))) cookieData = [] for (key, value) in self.factory.headers.items(): if key.lower() not in self._specialHeaders: # we calculated it on our own self.sendHeader(key, value) if key.lower() == b'cookie': cookieData.append(value) for cookie, cookval in self.factory.cookies.items(): cookieData.append(cookie + b'=' + cookval) if cookieData: self.sendHeader(b'Cookie', b'; '.join(cookieData)) self.endHeaders() self.headers = {} if data is not None: self.transport.write(data) def handleHeader(self, key, value): """ Called every time a header is received. Stores the header information as key-value pairs in the C{headers} attribute. @type key: C{str} @param key: An HTTP header field name. @type value: C{str} @param value: An HTTP header field value. """ key = key.lower() l = self.headers.setdefault(key, []) l.append(value) def handleStatus(self, version, status, message): """ Handle the HTTP status line. @param version: The HTTP version. @type version: L{bytes} @param status: The HTTP status code, an integer represented as a bytestring. @type status: L{bytes} @param message: The HTTP status message. @type message: L{bytes} """ self.version, self.status, self.message = version, status, message self.factory.gotStatus(version, status, message) def handleEndHeaders(self): self.factory.gotHeaders(self.headers) m = getattr(self, 'handleStatus_' + nativeString(self.status), self.handleStatusDefault) m() def handleStatus_200(self): pass handleStatus_201 = lambda self: self.handleStatus_200() handleStatus_202 = lambda self: self.handleStatus_200() def handleStatusDefault(self): self.failed = 1 def handleStatus_301(self): l = self.headers.get(b'location') if not l: self.handleStatusDefault() return url = l[0] if self.followRedirect: self.factory._redirectCount += 1 if self.factory._redirectCount >= self.factory.redirectLimit: err = error.InfiniteRedirection( self.status, b'Infinite redirection detected', location=url) self.factory.noPage(Failure(err)) self.quietLoss = True self.transport.loseConnection() return self._completelyDone = False self.factory.setURL(url) if self.factory.scheme == b'https': from twisted.internet import ssl contextFactory = ssl.ClientContextFactory() reactor.connectSSL(nativeString(self.factory.host), self.factory.port, self.factory, contextFactory) else: reactor.connectTCP(nativeString(self.factory.host), self.factory.port, self.factory) else: self.handleStatusDefault() self.factory.noPage( Failure( error.PageRedirect( self.status, self.message, location = url))) self.quietLoss = True self.transport.loseConnection() def handleStatus_302(self): if self.afterFoundGet: self.handleStatus_303() else: self.handleStatus_301() def handleStatus_303(self): self.factory.method = b'GET' self.handleStatus_301() def connectionLost(self, reason): """ When the connection used to issue the HTTP request is closed, notify the factory if we have not already, so it can produce a result. """ if not self.quietLoss: http.HTTPClient.connectionLost(self, reason) self.factory.noPage(reason) if self._completelyDone: # Only if we think we're completely done do we tell the factory that # we're "disconnected". This way when we're following redirects, # only the last protocol used will fire the _disconnectedDeferred. self.factory._disconnectedDeferred.callback(None) def handleResponse(self, response): if self.quietLoss: return if self.failed: self.factory.noPage( Failure( error.Error( self.status, self.message, response))) if self.factory.method == b'HEAD': # Callback with empty string, since there is never a response # body for HEAD requests. self.factory.page(b'') elif self.length != None and self.length != 0: self.factory.noPage(Failure( PartialDownloadError(self.status, self.message, response))) else: self.factory.page(response) # server might be stupid and not close connection. admittedly # the fact we do only one request per connection is also # stupid... self.transport.loseConnection() def timeout(self): self.quietLoss = True self.transport.abortConnection() self.factory.noPage(defer.TimeoutError("Getting %s took longer than %s seconds." % (self.factory.url, self.factory.timeout))) class HTTPPageDownloader(HTTPPageGetter): transmittingPage = 0 def handleStatus_200(self, partialContent=0): HTTPPageGetter.handleStatus_200(self) self.transmittingPage = 1 self.factory.pageStart(partialContent) def handleStatus_206(self): self.handleStatus_200(partialContent=1) def handleResponsePart(self, data): if self.transmittingPage: self.factory.pagePart(data) def handleResponseEnd(self): if self.length: self.transmittingPage = 0 self.factory.noPage( Failure( PartialDownloadError(self.status))) if self.transmittingPage: self.factory.pageEnd() self.transmittingPage = 0 if self.failed: self.factory.noPage( Failure( error.Error( self.status, self.message, None))) self.transport.loseConnection() class HTTPClientFactory(protocol.ClientFactory): """Download a given URL. @type deferred: Deferred @ivar deferred: A Deferred that will fire when the content has been retrieved. Once this is fired, the ivars `status', `version', and `message' will be set. @type status: bytes @ivar status: The status of the response. @type version: bytes @ivar version: The version of the response. @type message: bytes @ivar message: The text message returned with the status. @type response_headers: dict @ivar response_headers: The headers that were specified in the response from the server. @type method: bytes @ivar method: The HTTP method to use in the request. This should be one of OPTIONS, GET, HEAD, POST, PUT, DELETE, TRACE, or CONNECT (case matters). Other values may be specified if the server being contacted supports them. @type redirectLimit: int @ivar redirectLimit: The maximum number of HTTP redirects that can occur before it is assumed that the redirection is endless. @type afterFoundGet: C{bool} @ivar afterFoundGet: Deviate from the HTTP 1.1 RFC by handling redirects the same way as most web browsers; if the request method is POST and a 302 status is encountered, the redirect is followed with a GET method @type _redirectCount: int @ivar _redirectCount: The current number of HTTP redirects encountered. @ivar _disconnectedDeferred: A L{Deferred} which only fires after the last connection associated with the request (redirects may cause multiple connections to be required) has closed. The result Deferred will only fire after this Deferred, so that callers can be assured that there are no more event sources in the reactor once they get the result. """ protocol = HTTPPageGetter url = None scheme = None host = b'' port = None path = None def __init__(self, url, method=b'GET', postdata=None, headers=None, agent=b"Twisted PageGetter", timeout=0, cookies=None, followRedirect=True, redirectLimit=20, afterFoundGet=False): self.followRedirect = followRedirect self.redirectLimit = redirectLimit self._redirectCount = 0 self.timeout = timeout self.agent = agent self.afterFoundGet = afterFoundGet if cookies is None: cookies = {} self.cookies = cookies if headers is not None: self.headers = InsensitiveDict(headers) else: self.headers = InsensitiveDict() if postdata is not None: self.headers.setdefault(b'Content-Length', intToBytes(len(postdata))) # just in case a broken http/1.1 decides to keep connection alive self.headers.setdefault(b"connection", b"close") self.postdata = postdata self.method = method self.setURL(url) self.waiting = 1 self._disconnectedDeferred = defer.Deferred() self.deferred = defer.Deferred() # Make sure the first callback on the result Deferred pauses the # callback chain until the request connection is closed. self.deferred.addBoth(self._waitForDisconnect) self.response_headers = None def _waitForDisconnect(self, passthrough): """ Chain onto the _disconnectedDeferred, preserving C{passthrough}, so that the result is only available after the associated connection has been closed. """ self._disconnectedDeferred.addCallback(lambda ignored: passthrough) return self._disconnectedDeferred def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.url) def setURL(self, url): self.url = url uri = URI.fromBytes(url) if uri.scheme and uri.host: self.scheme = uri.scheme self.host = uri.host self.port = uri.port self.path = uri.originForm def buildProtocol(self, addr): p = protocol.ClientFactory.buildProtocol(self, addr) p.followRedirect = self.followRedirect p.afterFoundGet = self.afterFoundGet if self.timeout: timeoutCall = reactor.callLater(self.timeout, p.timeout) self.deferred.addBoth(self._cancelTimeout, timeoutCall) return p def _cancelTimeout(self, result, timeoutCall): if timeoutCall.active(): timeoutCall.cancel() return result def gotHeaders(self, headers): """ Parse the response HTTP headers. @param headers: The response HTTP headers. @type headers: L{dict} """ self.response_headers = headers if b'set-cookie' in headers: for cookie in headers[b'set-cookie']: if b'=' in cookie: cookparts = cookie.split(b';') cook = cookparts[0] cook.lstrip() k, v = cook.split(b'=', 1) self.cookies[k.lstrip()] = v.lstrip() def gotStatus(self, version, status, message): """ Set the status of the request on us. @param version: The HTTP version. @type version: L{bytes} @param status: The HTTP status code, an integer represented as a bytestring. @type status: L{bytes} @param message: The HTTP status message. @type message: L{bytes} """ self.version, self.status, self.message = version, status, message def page(self, page): if self.waiting: self.waiting = 0 self.deferred.callback(page) def noPage(self, reason): if self.waiting: self.waiting = 0 self.deferred.errback(reason) def clientConnectionFailed(self, _, reason): """ When a connection attempt fails, the request cannot be issued. If no result has yet been provided to the result Deferred, provide the connection failure reason as an error result. """ if self.waiting: self.waiting = 0 # If the connection attempt failed, there is nothing more to # disconnect, so just fire that Deferred now. self._disconnectedDeferred.callback(None) self.deferred.errback(reason) class HTTPDownloader(HTTPClientFactory): """ Download to a file. """ protocol = HTTPPageDownloader value = None _log = Logger() def __init__(self, url, fileOrName, method=b'GET', postdata=None, headers=None, agent=b"Twisted client", supportPartial=False, timeout=0, cookies=None, followRedirect=True, redirectLimit=20, afterFoundGet=False): self.requestedPartial = 0 if isinstance(fileOrName, (str, unicode)): self.fileName = fileOrName self.file = None if supportPartial and os.path.exists(self.fileName): fileLength = os.path.getsize(self.fileName) if fileLength: self.requestedPartial = fileLength if headers == None: headers = {} headers[b"range"] = b"bytes=" + intToBytes(fileLength) + b"-" else: self.file = fileOrName HTTPClientFactory.__init__( self, url, method=method, postdata=postdata, headers=headers, agent=agent, timeout=timeout, cookies=cookies, followRedirect=followRedirect, redirectLimit=redirectLimit, afterFoundGet=afterFoundGet) def gotHeaders(self, headers): HTTPClientFactory.gotHeaders(self, headers) if self.requestedPartial: contentRange = headers.get(b"content-range", None) if not contentRange: # server doesn't support partial requests, oh well self.requestedPartial = 0 return start, end, realLength = http.parseContentRange(contentRange[0]) if start != self.requestedPartial: # server is acting weirdly self.requestedPartial = 0 def openFile(self, partialContent): if partialContent: file = open(self.fileName, 'rb+') file.seek(0, 2) else: file = open(self.fileName, 'wb') return file def pageStart(self, partialContent): """Called on page download start. @param partialContent: tells us if the download is partial download we requested. """ if partialContent and not self.requestedPartial: raise ValueError("we shouldn't get partial content response if we didn't want it!") if self.waiting: try: if not self.file: self.file = self.openFile(partialContent) except IOError: #raise self.deferred.errback(Failure()) def pagePart(self, data): if not self.file: return try: self.file.write(data) except IOError: #raise self.file = None self.deferred.errback(Failure()) def noPage(self, reason): """ Close the storage file and errback the waiting L{Deferred} with the given reason. """ if self.waiting: self.waiting = 0 if self.file: try: self.file.close() except: self._log.failure("Error closing HTTPDownloader file") self.deferred.errback(reason) def pageEnd(self): self.waiting = 0 if not self.file: return try: self.file.close() except IOError: self.deferred.errback(Failure()) return self.deferred.callback(self.value) class URI(object): """ A URI object. @see: U{https://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-21} """ def __init__(self, scheme, netloc, host, port, path, params, query, fragment): """ @type scheme: L{bytes} @param scheme: URI scheme specifier. @type netloc: L{bytes} @param netloc: Network location component. @type host: L{bytes} @param host: Host name. For IPv6 address literals the brackets are stripped. @type port: L{int} @param port: Port number. @type path: L{bytes} @param path: Hierarchical path. @type params: L{bytes} @param params: Parameters for last path segment. @type query: L{bytes} @param query: Query string. @type fragment: L{bytes} @param fragment: Fragment identifier. """ self.scheme = scheme self.netloc = netloc self.host = host.strip(b'[]') self.port = port self.path = path self.params = params self.query = query self.fragment = fragment @classmethod def fromBytes(cls, uri, defaultPort=None): """ Parse the given URI into a L{URI}. @type uri: C{bytes} @param uri: URI to parse. @type defaultPort: C{int} or L{None} @param defaultPort: An alternate value to use as the port if the URI does not include one. @rtype: L{URI} @return: Parsed URI instance. """ uri = uri.strip() scheme, netloc, path, params, query, fragment = http.urlparse(uri) if defaultPort is None: if scheme == b'https': defaultPort = 443 else: defaultPort = 80 if b':' in netloc: host, port = netloc.rsplit(b':', 1) try: port = int(port) except ValueError: host, port = netloc, defaultPort else: host, port = netloc, defaultPort return cls(scheme, netloc, host, port, path, params, query, fragment) def toBytes(self): """ Assemble the individual parts of the I{URI} into a fully formed I{URI}. @rtype: C{bytes} @return: A fully formed I{URI}. """ return urlunparse( (self.scheme, self.netloc, self.path, self.params, self.query, self.fragment)) @property def originForm(self): """ The absolute I{URI} path including I{URI} parameters, query string and fragment identifier. @see: U{https://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-21#section-5.3} @return: The absolute path in original form. @rtype: L{bytes} """ # The HTTP bis draft says the origin form should not include the # fragment. path = urlunparse( (b'', b'', self.path, self.params, self.query, b'')) if path == b'': path = b'/' return path def _urljoin(base, url): """ Construct a full ("absolute") URL by combining a "base URL" with another URL. Informally, this uses components of the base URL, in particular the addressing scheme, the network location and (part of) the path, to provide missing components in the relative URL. Additionally, the fragment identifier is preserved according to the HTTP 1.1 bis draft. @type base: C{bytes} @param base: Base URL. @type url: C{bytes} @param url: URL to combine with C{base}. @return: An absolute URL resulting from the combination of C{base} and C{url}. @see: L{urlparse.urljoin} @see: U{https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-22#section-7.1.2} """ base, baseFrag = urldefrag(base) url, urlFrag = urldefrag(urljoin(base, url)) return urljoin(url, b'#' + (urlFrag or baseFrag)) def _makeGetterFactory(url, factoryFactory, contextFactory=None, *args, **kwargs): """ Create and connect an HTTP page getting factory. Any additional positional or keyword arguments are used when calling C{factoryFactory}. @param factoryFactory: Factory factory that is called with C{url}, C{args} and C{kwargs} to produce the getter @param contextFactory: Context factory to use when creating a secure connection, defaulting to L{None} @return: The factory created by C{factoryFactory} """ uri = URI.fromBytes(url) factory = factoryFactory(url, *args, **kwargs) if uri.scheme == b'https': from twisted.internet import ssl if contextFactory is None: contextFactory = ssl.ClientContextFactory() reactor.connectSSL( nativeString(uri.host), uri.port, factory, contextFactory) else: reactor.connectTCP(nativeString(uri.host), uri.port, factory) return factory _GETPAGE_REPLACEMENT_TEXT = "https://pypi.org/project/treq/ or twisted.web.client.Agent" def _deprecateGetPageClasses(): """ Mark the protocols and factories associated with L{getPage} and L{downloadPage} as deprecated. """ for klass in [ HTTPPageGetter, HTTPPageDownloader, HTTPClientFactory, HTTPDownloader ]: deprecatedModuleAttribute( Version("Twisted", 16, 7, 0), getDeprecationWarningString( klass, Version("Twisted", 16, 7, 0), replacement=_GETPAGE_REPLACEMENT_TEXT) .split("; ")[1], klass.__module__, klass.__name__) _deprecateGetPageClasses() @deprecated(Version("Twisted", 16, 7, 0), _GETPAGE_REPLACEMENT_TEXT) def getPage(url, contextFactory=None, *args, **kwargs): """ Download a web page as a string. Download a page. Return a deferred, which will callback with a page (as a string) or errback with a description of the error. See L{HTTPClientFactory} to see what extra arguments can be passed. """ return _makeGetterFactory( url, HTTPClientFactory, contextFactory=contextFactory, *args, **kwargs).deferred @deprecated(Version("Twisted", 16, 7, 0), _GETPAGE_REPLACEMENT_TEXT) def downloadPage(url, file, contextFactory=None, *args, **kwargs): """ Download a web page to a file. @param file: path to file on filesystem, or file-like object. See HTTPDownloader to see what extra args can be passed. """ factoryFactory = lambda url, *a, **kw: HTTPDownloader(url, file, *a, **kw) return _makeGetterFactory( url, factoryFactory, contextFactory=contextFactory, *args, **kwargs).deferred # The code which follows is based on the new HTTP client implementation. It # should be significantly better than anything above, though it is not yet # feature equivalent. from twisted.web.error import SchemeNotSupported from twisted.web._newclient import ( HTTP11ClientProtocol, PotentialDataLoss, Request, RequestGenerationFailed, RequestNotSent, RequestTransmissionFailed, Response, ResponseDone, ResponseFailed, ResponseNeverReceived, _WrapperException, ) try: from OpenSSL import SSL except ImportError: SSL = None else: from twisted.internet.ssl import (CertificateOptions, platformTrust, optionsForClientTLS) def _requireSSL(decoratee): """ The decorated method requires pyOpenSSL to be present, or it raises L{NotImplementedError}. @param decoratee: A function which requires pyOpenSSL. @type decoratee: L{callable} @return: A function which raises L{NotImplementedError} if pyOpenSSL is not installed; otherwise, if it is installed, simply return C{decoratee}. @rtype: L{callable} """ if SSL is None: @wraps(decoratee) def raiseNotImplemented(*a, **kw): """ pyOpenSSL is not available. @param a: The positional arguments for C{decoratee}. @param kw: The keyword arguments for C{decoratee}. @raise NotImplementedError: Always. """ raise NotImplementedError("SSL support unavailable") return raiseNotImplemented return decoratee class WebClientContextFactory(object): """ This class is deprecated. Please simply use L{Agent} as-is, or if you want to customize something, use L{BrowserLikePolicyForHTTPS}. A L{WebClientContextFactory} is an HTTPS policy which totally ignores the hostname and port. It performs basic certificate verification, however the lack of validation of service identity (e.g. hostname validation) means it is still vulnerable to man-in-the-middle attacks. Don't use it any more. """ def _getCertificateOptions(self, hostname, port): """ Return a L{CertificateOptions}. @param hostname: ignored @param port: ignored @return: A new CertificateOptions instance. @rtype: L{CertificateOptions} """ return CertificateOptions( method=SSL.SSLv23_METHOD, trustRoot=platformTrust() ) @_requireSSL def getContext(self, hostname, port): """ Return an L{OpenSSL.SSL.Context}. @param hostname: ignored @param port: ignored @return: A new SSL context. @rtype: L{OpenSSL.SSL.Context} """ return self._getCertificateOptions(hostname, port).getContext() @implementer(IPolicyForHTTPS) class BrowserLikePolicyForHTTPS(object): """ SSL connection creator for web clients. """ def __init__(self, trustRoot=None): self._trustRoot = trustRoot @_requireSSL def creatorForNetloc(self, hostname, port): """ Create a L{client connection creator <twisted.internet.interfaces.IOpenSSLClientConnectionCreator>} for a given network location. @param tls: The TLS protocol to create a connection for. @type tls: L{twisted.protocols.tls.TLSMemoryBIOProtocol} @param hostname: The hostname part of the URI. @type hostname: L{bytes} @param port: The port part of the URI. @type port: L{int} @return: a connection creator with appropriate verification restrictions set @rtype: L{client connection creator <twisted.internet.interfaces.IOpenSSLClientConnectionCreator>} """ return optionsForClientTLS(hostname.decode("ascii"), trustRoot=self._trustRoot) deprecatedModuleAttribute(Version("Twisted", 14, 0, 0), getDeprecationWarningString( WebClientContextFactory, Version("Twisted", 14, 0, 0), replacement=BrowserLikePolicyForHTTPS) .split("; ")[1], WebClientContextFactory.__module__, WebClientContextFactory.__name__) @implementer(IPolicyForHTTPS) class HostnameCachingHTTPSPolicy(object): """ IPolicyForHTTPS that wraps a L{IPolicyForHTTPS} and caches the created L{IOpenSSLClientConnectionCreator}. This policy will cache up to C{cacheSize} L{client connection creators <twisted.internet.interfaces. IOpenSSLClientConnectionCreator>} for reuse in subsequent requests to the same hostname. @ivar _policyForHTTPS: See C{policyforHTTPS} parameter of L{__init__}. @ivar _cache: A cache associating hostnames to their L{client connection creators <twisted.internet.interfaces. IOpenSSLClientConnectionCreator>}. @type _cache: L{collections.OrderedDict} @ivar _cacheSize: See C{cacheSize} parameter of L{__init__}. @since: Twisted 19.2.0 """ def __init__(self, policyforHTTPS, cacheSize=20): """ @param policyforHTTPS: The IPolicyForHTTPS to wrap. @type policyforHTTPS: L{IPolicyForHTTPS} @param cacheSize: The maximum size of the hostname cache. @type cacheSize: L{int} """ self._policyForHTTPS = policyforHTTPS self._cache = collections.OrderedDict() self._cacheSize = cacheSize def creatorForNetloc(self, hostname, port): """ Create a L{client connection creator <twisted.internet.interfaces.IOpenSSLClientConnectionCreator>} for a given network location and cache it for future use. @param hostname: The hostname part of the URI. @type hostname: L{bytes} @param port: The port part of the URI. @type port: L{int} @return: a connection creator with appropriate verification restrictions set @rtype: L{client connection creator <twisted.internet.interfaces.IOpenSSLClientConnectionCreator>} """ host = hostname.decode("ascii") try: creator = self._cache.pop(host) except KeyError: creator = self._policyForHTTPS.creatorForNetloc(hostname, port) self._cache[host] = creator if len(self._cache) > self._cacheSize: self._cache.popitem(last=False) return creator @implementer(IOpenSSLContextFactory) class _ContextFactoryWithContext(object): """ A L{_ContextFactoryWithContext} is like a L{twisted.internet.ssl.ContextFactory} with a pre-created context. @ivar _context: A Context. @type _context: L{OpenSSL.SSL.Context} """ def __init__(self, context): """ Initialize a L{_ContextFactoryWithContext} with a context. @param context: An SSL context. @type context: L{OpenSSL.SSL.Context} """ self._context = context def getContext(self): """ Return the context created by L{_DeprecatedToCurrentPolicyForHTTPS._webContextFactory}. @return: A context. @rtype context: L{OpenSSL.SSL.Context} """ return self._context @implementer(IPolicyForHTTPS) class _DeprecatedToCurrentPolicyForHTTPS(object): """ Adapt a web context factory to a normal context factory. @ivar _webContextFactory: An object providing a getContext method with C{hostname} and C{port} arguments. @type _webContextFactory: L{WebClientContextFactory} (or object with a similar C{getContext} method). """ def __init__(self, webContextFactory): """ Wrap a web context factory in an L{IPolicyForHTTPS}. @param webContextFactory: An object providing a getContext method with C{hostname} and C{port} arguments. @type webContextFactory: L{WebClientContextFactory} (or object with a similar C{getContext} method). """ self._webContextFactory = webContextFactory def creatorForNetloc(self, hostname, port): """ Called the wrapped web context factory's C{getContext} method with a hostname and port number and return the resulting context object. @param hostname: The hostname part of the URI. @type hostname: L{bytes} @param port: The port part of the URI. @type port: L{int} @return: A context factory. @rtype: L{IOpenSSLContextFactory} """ context = self._webContextFactory.getContext(hostname, port) return _ContextFactoryWithContext(context) @implementer(IBodyProducer) class FileBodyProducer(object): """ L{FileBodyProducer} produces bytes from an input file object incrementally and writes them to a consumer. Since file-like objects cannot be read from in an event-driven manner, L{FileBodyProducer} uses a L{Cooperator} instance to schedule reads from the file. This process is also paused and resumed based on notifications from the L{IConsumer} provider being written to. The file is closed after it has been read, or if the producer is stopped early. @ivar _inputFile: Any file-like object, bytes read from which will be written to a consumer. @ivar _cooperate: A method like L{Cooperator.cooperate} which is used to schedule all reads. @ivar _readSize: The number of bytes to read from C{_inputFile} at a time. """ def __init__(self, inputFile, cooperator=task, readSize=2 ** 16): self._inputFile = inputFile self._cooperate = cooperator.cooperate self._readSize = readSize self.length = self._determineLength(inputFile) def _determineLength(self, fObj): """ Determine how many bytes can be read out of C{fObj} (assuming it is not modified from this point on). If the determination cannot be made, return C{UNKNOWN_LENGTH}. """ try: seek = fObj.seek tell = fObj.tell except AttributeError: return UNKNOWN_LENGTH originalPosition = tell() seek(0, os.SEEK_END) end = tell() seek(originalPosition, os.SEEK_SET) return end - originalPosition def stopProducing(self): """ Permanently stop writing bytes from the file to the consumer by stopping the underlying L{CooperativeTask}. """ self._inputFile.close() self._task.stop() def startProducing(self, consumer): """ Start a cooperative task which will read bytes from the input file and write them to C{consumer}. Return a L{Deferred} which fires after all bytes have been written. @param consumer: Any L{IConsumer} provider """ self._task = self._cooperate(self._writeloop(consumer)) d = self._task.whenDone() def maybeStopped(reason): # IBodyProducer.startProducing's Deferred isn't support to fire if # stopProducing is called. reason.trap(task.TaskStopped) return defer.Deferred() d.addCallbacks(lambda ignored: None, maybeStopped) return d def _writeloop(self, consumer): """ Return an iterator which reads one chunk of bytes from the input file and writes them to the consumer for each time it is iterated. """ while True: bytes = self._inputFile.read(self._readSize) if not bytes: self._inputFile.close() break consumer.write(bytes) yield None def pauseProducing(self): """ Temporarily suspend copying bytes from the input file to the consumer by pausing the L{CooperativeTask} which drives that activity. """ self._task.pause() def resumeProducing(self): """ Undo the effects of a previous C{pauseProducing} and resume copying bytes to the consumer by resuming the L{CooperativeTask} which drives the write activity. """ self._task.resume() class _HTTP11ClientFactory(protocol.Factory): """ A factory for L{HTTP11ClientProtocol}, used by L{HTTPConnectionPool}. @ivar _quiescentCallback: The quiescent callback to be passed to protocol instances, used to return them to the connection pool. @ivar _metadata: Metadata about the low-level connection details, used to make the repr more useful. @since: 11.1 """ def __init__(self, quiescentCallback, metadata): self._quiescentCallback = quiescentCallback self._metadata = metadata def __repr__(self): return '_HTTP11ClientFactory({}, {})'.format( self._quiescentCallback, self._metadata) def buildProtocol(self, addr): return HTTP11ClientProtocol(self._quiescentCallback) class _RetryingHTTP11ClientProtocol(object): """ A wrapper for L{HTTP11ClientProtocol} that automatically retries requests. @ivar _clientProtocol: The underlying L{HTTP11ClientProtocol}. @ivar _newConnection: A callable that creates a new connection for a retry. """ def __init__(self, clientProtocol, newConnection): self._clientProtocol = clientProtocol self._newConnection = newConnection def _shouldRetry(self, method, exception, bodyProducer): """ Indicate whether request should be retried. Only returns C{True} if method is idempotent, no response was received, the reason for the failed request was not due to user-requested cancellation, and no body was sent. The latter requirement may be relaxed in the future, and PUT added to approved method list. @param method: The method of the request. @type method: L{bytes} """ if method not in (b"GET", b"HEAD", b"OPTIONS", b"DELETE", b"TRACE"): return False if not isinstance(exception, (RequestNotSent, RequestTransmissionFailed, ResponseNeverReceived)): return False if isinstance(exception, _WrapperException): for aFailure in exception.reasons: if aFailure.check(defer.CancelledError): return False if bodyProducer is not None: return False return True def request(self, request): """ Do a request, and retry once (with a new connection) if it fails in a retryable manner. @param request: A L{Request} instance that will be requested using the wrapped protocol. """ d = self._clientProtocol.request(request) def failed(reason): if self._shouldRetry(request.method, reason.value, request.bodyProducer): return self._newConnection().addCallback( lambda connection: connection.request(request)) else: return reason d.addErrback(failed) return d class HTTPConnectionPool(object): """ A pool of persistent HTTP connections. Features: - Cached connections will eventually time out. - Limits on maximum number of persistent connections. Connections are stored using keys, which should be chosen such that any connections stored under a given key can be used interchangeably. Failed requests done using previously cached connections will be retried once if they use an idempotent method (e.g. GET), in case the HTTP server timed them out. @ivar persistent: Boolean indicating whether connections should be persistent. Connections are persistent by default. @ivar maxPersistentPerHost: The maximum number of cached persistent connections for a C{host:port} destination. @type maxPersistentPerHost: C{int} @ivar cachedConnectionTimeout: Number of seconds a cached persistent connection will stay open before disconnecting. @ivar retryAutomatically: C{boolean} indicating whether idempotent requests should be retried once if no response was received. @ivar _factory: The factory used to connect to the proxy. @ivar _connections: Map (scheme, host, port) to lists of L{HTTP11ClientProtocol} instances. @ivar _timeouts: Map L{HTTP11ClientProtocol} instances to a C{IDelayedCall} instance of their timeout. @since: 12.1 """ _factory = _HTTP11ClientFactory maxPersistentPerHost = 2 cachedConnectionTimeout = 240 retryAutomatically = True _log = Logger() def __init__(self, reactor, persistent=True): self._reactor = reactor self.persistent = persistent self._connections = {} self._timeouts = {} def getConnection(self, key, endpoint): """ Supply a connection, newly created or retrieved from the pool, to be used for one HTTP request. The connection will remain out of the pool (not available to be returned from future calls to this method) until one HTTP request has been completed over it. Afterwards, if the connection is still open, it will automatically be added to the pool. @param key: A unique key identifying connections that can be used interchangeably. @param endpoint: An endpoint that can be used to open a new connection if no cached connection is available. @return: A C{Deferred} that will fire with a L{HTTP11ClientProtocol} (or a wrapper) that can be used to send a single HTTP request. """ # Try to get cached version: connections = self._connections.get(key) while connections: connection = connections.pop(0) # Cancel timeout: self._timeouts[connection].cancel() del self._timeouts[connection] if connection.state == "QUIESCENT": if self.retryAutomatically: newConnection = lambda: self._newConnection(key, endpoint) connection = _RetryingHTTP11ClientProtocol( connection, newConnection) return defer.succeed(connection) return self._newConnection(key, endpoint) def _newConnection(self, key, endpoint): """ Create a new connection. This implements the new connection code path for L{getConnection}. """ def quiescentCallback(protocol): self._putConnection(key, protocol) factory = self._factory(quiescentCallback, repr(endpoint)) return endpoint.connect(factory) def _removeConnection(self, key, connection): """ Remove a connection from the cache and disconnect it. """ connection.transport.loseConnection() self._connections[key].remove(connection) del self._timeouts[connection] def _putConnection(self, key, connection): """ Return a persistent connection to the pool. This will be called by L{HTTP11ClientProtocol} when the connection becomes quiescent. """ if connection.state != "QUIESCENT": # Log with traceback for debugging purposes: try: raise RuntimeError( "BUG: Non-quiescent protocol added to connection pool.") except: self._log.failure( "BUG: Non-quiescent protocol added to connection pool.") return connections = self._connections.setdefault(key, []) if len(connections) == self.maxPersistentPerHost: dropped = connections.pop(0) dropped.transport.loseConnection() self._timeouts[dropped].cancel() del self._timeouts[dropped] connections.append(connection) cid = self._reactor.callLater(self.cachedConnectionTimeout, self._removeConnection, key, connection) self._timeouts[connection] = cid def closeCachedConnections(self): """ Close all persistent connections and remove them from the pool. @return: L{defer.Deferred} that fires when all connections have been closed. """ results = [] for protocols in itervalues(self._connections): for p in protocols: results.append(p.abort()) self._connections = {} for dc in itervalues(self._timeouts): dc.cancel() self._timeouts = {} return defer.gatherResults(results).addCallback(lambda ign: None) class _AgentBase(object): """ Base class offering common facilities for L{Agent}-type classes. @ivar _reactor: The C{IReactorTime} implementation which will be used by the pool, and perhaps by subclasses as well. @ivar _pool: The L{HTTPConnectionPool} used to manage HTTP connections. """ def __init__(self, reactor, pool): if pool is None: pool = HTTPConnectionPool(reactor, False) self._reactor = reactor self._pool = pool def _computeHostValue(self, scheme, host, port): """ Compute the string to use for the value of the I{Host} header, based on the given scheme, host name, and port number. """ if (isIPv6Address(nativeString(host))): host = b'[' + host + b']' if (scheme, port) in ((b'http', 80), (b'https', 443)): return host return host + b":" + intToBytes(port) def _requestWithEndpoint(self, key, endpoint, method, parsedURI, headers, bodyProducer, requestPath): """ Issue a new request, given the endpoint and the path sent as part of the request. """ if not isinstance(method, bytes): raise TypeError('method={!r} is {}, but must be bytes'.format( method, type(method))) # Create minimal headers, if necessary: if headers is None: headers = Headers() if not headers.hasHeader(b'host'): headers = headers.copy() headers.addRawHeader( b'host', self._computeHostValue(parsedURI.scheme, parsedURI.host, parsedURI.port)) d = self._pool.getConnection(key, endpoint) def cbConnected(proto): return proto.request( Request._construct(method, requestPath, headers, bodyProducer, persistent=self._pool.persistent, parsedURI=parsedURI)) d.addCallback(cbConnected) return d @implementer(IAgentEndpointFactory) class _StandardEndpointFactory(object): """ Standard HTTP endpoint destinations - TCP for HTTP, TCP+TLS for HTTPS. @ivar _policyForHTTPS: A web context factory which will be used to create SSL context objects for any SSL connections the agent needs to make. @ivar _connectTimeout: If not L{None}, the timeout passed to L{HostnameEndpoint} for specifying the connection timeout. @ivar _bindAddress: If not L{None}, the address passed to L{HostnameEndpoint} for specifying the local address to bind to. """ def __init__(self, reactor, contextFactory, connectTimeout, bindAddress): """ @param reactor: A provider to use to create endpoints. @type reactor: see L{HostnameEndpoint.__init__} for acceptable reactor types. @param contextFactory: A factory for TLS contexts, to control the verification parameters of OpenSSL. @type contextFactory: L{IPolicyForHTTPS}. @param connectTimeout: The amount of time that this L{Agent} will wait for the peer to accept a connection. @type connectTimeout: L{float} or L{None} @param bindAddress: The local address for client sockets to bind to. @type bindAddress: L{bytes} or L{None} """ self._reactor = reactor self._policyForHTTPS = contextFactory self._connectTimeout = connectTimeout self._bindAddress = bindAddress def endpointForURI(self, uri): """ Connect directly over TCP for C{b'http'} scheme, and TLS for C{b'https'}. @param uri: L{URI} to connect to. @return: Endpoint to connect to. @rtype: L{IStreamClientEndpoint} """ kwargs = {} if self._connectTimeout is not None: kwargs['timeout'] = self._connectTimeout kwargs['bindAddress'] = self._bindAddress try: host = nativeString(uri.host) except UnicodeDecodeError: raise ValueError(("The host of the provided URI ({uri.host!r}) " "contains non-ASCII octets, it should be ASCII " "decodable.").format(uri=uri)) endpoint = HostnameEndpoint(self._reactor, host, uri.port, **kwargs) if uri.scheme == b'http': return endpoint elif uri.scheme == b'https': connectionCreator = self._policyForHTTPS.creatorForNetloc(uri.host, uri.port) return wrapClientTLS(connectionCreator, endpoint) else: raise SchemeNotSupported("Unsupported scheme: %r" % (uri.scheme,)) @implementer(IAgent) class Agent(_AgentBase): """ L{Agent} is a very basic HTTP client. It supports I{HTTP} and I{HTTPS} scheme URIs. @ivar _pool: An L{HTTPConnectionPool} instance. @ivar _endpointFactory: The L{IAgentEndpointFactory} which will be used to create endpoints for outgoing connections. @since: 9.0 """ def __init__(self, reactor, contextFactory=BrowserLikePolicyForHTTPS(), connectTimeout=None, bindAddress=None, pool=None): """ Create an L{Agent}. @param reactor: A reactor for this L{Agent} to place outgoing connections. @type reactor: see L{HostnameEndpoint.__init__} for acceptable reactor types. @param contextFactory: A factory for TLS contexts, to control the verification parameters of OpenSSL. The default is to use a L{BrowserLikePolicyForHTTPS}, so unless you have special requirements you can leave this as-is. @type contextFactory: L{IPolicyForHTTPS}. @param connectTimeout: The amount of time that this L{Agent} will wait for the peer to accept a connection. @type connectTimeout: L{float} @param bindAddress: The local address for client sockets to bind to. @type bindAddress: L{bytes} @param pool: An L{HTTPConnectionPool} instance, or L{None}, in which case a non-persistent L{HTTPConnectionPool} instance will be created. @type pool: L{HTTPConnectionPool} """ if not IPolicyForHTTPS.providedBy(contextFactory): warnings.warn( repr(contextFactory) + " was passed as the HTTPS policy for an Agent, but it does " "not provide IPolicyForHTTPS. Since Twisted 14.0, you must " "pass a provider of IPolicyForHTTPS.", stacklevel=2, category=DeprecationWarning ) contextFactory = _DeprecatedToCurrentPolicyForHTTPS(contextFactory) endpointFactory = _StandardEndpointFactory( reactor, contextFactory, connectTimeout, bindAddress) self._init(reactor, endpointFactory, pool) @classmethod def usingEndpointFactory(cls, reactor, endpointFactory, pool=None): """ Create a new L{Agent} that will use the endpoint factory to figure out how to connect to the server. @param reactor: A reactor for this L{Agent} to place outgoing connections. @type reactor: see L{HostnameEndpoint.__init__} for acceptable reactor types. @param endpointFactory: Used to construct endpoints which the HTTP client will connect with. @type endpointFactory: an L{IAgentEndpointFactory} provider. @param pool: An L{HTTPConnectionPool} instance, or L{None}, in which case a non-persistent L{HTTPConnectionPool} instance will be created. @type pool: L{HTTPConnectionPool} @return: A new L{Agent}. """ agent = cls.__new__(cls) agent._init(reactor, endpointFactory, pool) return agent def _init(self, reactor, endpointFactory, pool): """ Initialize a new L{Agent}. @param reactor: A reactor for this L{Agent} to place outgoing connections. @type reactor: see L{HostnameEndpoint.__init__} for acceptable reactor types. @param endpointFactory: Used to construct endpoints which the HTTP client will connect with. @type endpointFactory: an L{IAgentEndpointFactory} provider. @param pool: An L{HTTPConnectionPool} instance, or L{None}, in which case a non-persistent L{HTTPConnectionPool} instance will be created. @type pool: L{HTTPConnectionPool} @return: A new L{Agent}. """ _AgentBase.__init__(self, reactor, pool) self._endpointFactory = endpointFactory def _getEndpoint(self, uri): """ Get an endpoint for the given URI, using C{self._endpointFactory}. @param uri: The URI of the request. @type uri: L{URI} @return: An endpoint which can be used to connect to given address. """ return self._endpointFactory.endpointForURI(uri) def request(self, method, uri, headers=None, bodyProducer=None): """ Issue a request to the server indicated by the given C{uri}. An existing connection from the connection pool may be used or a new one may be created. I{HTTP} and I{HTTPS} schemes are supported in C{uri}. @see: L{twisted.web.iweb.IAgent.request} """ parsedURI = URI.fromBytes(uri) try: endpoint = self._getEndpoint(parsedURI) except SchemeNotSupported: return defer.fail(Failure()) key = (parsedURI.scheme, parsedURI.host, parsedURI.port) return self._requestWithEndpoint(key, endpoint, method, parsedURI, headers, bodyProducer, parsedURI.originForm) @implementer(IAgent) class ProxyAgent(_AgentBase): """ An HTTP agent able to cross HTTP proxies. @ivar _proxyEndpoint: The endpoint used to connect to the proxy. @since: 11.1 """ def __init__(self, endpoint, reactor=None, pool=None): if reactor is None: from twisted.internet import reactor _AgentBase.__init__(self, reactor, pool) self._proxyEndpoint = endpoint def request(self, method, uri, headers=None, bodyProducer=None): """ Issue a new request via the configured proxy. """ # Cache *all* connections under the same key, since we are only # connecting to a single destination, the proxy: key = ("http-proxy", self._proxyEndpoint) # To support proxying HTTPS via CONNECT, we will use key # ("http-proxy-CONNECT", scheme, host, port), and an endpoint that # wraps _proxyEndpoint with an additional callback to do the CONNECT. return self._requestWithEndpoint(key, self._proxyEndpoint, method, URI.fromBytes(uri), headers, bodyProducer, uri) class _FakeUrllib2Request(object): """ A fake C{urllib2.Request} object for C{cookielib} to work with. @see: U{http://docs.python.org/library/urllib2.html#request-objects} @type uri: native L{str} @ivar uri: Request URI. @type headers: L{twisted.web.http_headers.Headers} @ivar headers: Request headers. @type type: native L{str} @ivar type: The scheme of the URI. @type host: native L{str} @ivar host: The host[:port] of the URI. @since: 11.1 """ def __init__(self, uri): """ Create a fake Urllib2 request. @param uri: Request URI. @type uri: L{bytes} """ self.uri = nativeString(uri) self.headers = Headers() _uri = URI.fromBytes(uri) self.type = nativeString(_uri.scheme) self.host = nativeString(_uri.host) if (_uri.scheme, _uri.port) not in ((b'http', 80), (b'https', 443)): # If it's not a schema on the regular port, add the port. self.host += ":" + str(_uri.port) if _PY3: self.origin_req_host = nativeString(_uri.host) self.unverifiable = lambda _: False def has_header(self, header): return self.headers.hasHeader(networkString(header)) def add_unredirected_header(self, name, value): self.headers.addRawHeader(networkString(name), networkString(value)) def get_full_url(self): return self.uri def get_header(self, name, default=None): headers = self.headers.getRawHeaders(networkString(name), default) if headers is not None: headers = [nativeString(x) for x in headers] return headers[0] return None def get_host(self): return self.host def get_type(self): return self.type def is_unverifiable(self): # In theory this shouldn't be hardcoded. return False class _FakeUrllib2Response(object): """ A fake C{urllib2.Response} object for C{cookielib} to work with. @type response: C{twisted.web.iweb.IResponse} @ivar response: Underlying Twisted Web response. @since: 11.1 """ def __init__(self, response): self.response = response def info(self): class _Meta(object): def getheaders(zelf, name): # PY2 headers = self.response.headers.getRawHeaders(name, []) return headers def get_all(zelf, name, default): # PY3 headers = self.response.headers.getRawHeaders( networkString(name), default) h = [nativeString(x) for x in headers] return h return _Meta() @implementer(IAgent) class CookieAgent(object): """ L{CookieAgent} extends the basic L{Agent} to add RFC-compliant handling of HTTP cookies. Cookies are written to and extracted from a C{cookielib.CookieJar} instance. The same cookie jar instance will be used for any requests through this agent, mutating it whenever a I{Set-Cookie} header appears in a response. @type _agent: L{twisted.web.client.Agent} @ivar _agent: Underlying Twisted Web agent to issue requests through. @type cookieJar: C{cookielib.CookieJar} @ivar cookieJar: Initialized cookie jar to read cookies from and store cookies to. @since: 11.1 """ def __init__(self, agent, cookieJar): self._agent = agent self.cookieJar = cookieJar def request(self, method, uri, headers=None, bodyProducer=None): """ Issue a new request to the wrapped L{Agent}. Send a I{Cookie} header if a cookie for C{uri} is stored in L{CookieAgent.cookieJar}. Cookies are automatically extracted and stored from requests. If a C{'cookie'} header appears in C{headers} it will override the automatic cookie header obtained from the cookie jar. @see: L{Agent.request} """ if headers is None: headers = Headers() lastRequest = _FakeUrllib2Request(uri) # Setting a cookie header explicitly will disable automatic request # cookies. if not headers.hasHeader(b'cookie'): self.cookieJar.add_cookie_header(lastRequest) cookieHeader = lastRequest.get_header('Cookie', None) if cookieHeader is not None: headers = headers.copy() headers.addRawHeader(b'cookie', networkString(cookieHeader)) d = self._agent.request(method, uri, headers, bodyProducer) d.addCallback(self._extractCookies, lastRequest) return d def _extractCookies(self, response, request): """ Extract response cookies and store them in the cookie jar. @type response: L{twisted.web.iweb.IResponse} @param response: Twisted Web response. @param request: A urllib2 compatible request object. """ resp = _FakeUrllib2Response(response) self.cookieJar.extract_cookies(resp, request) return response class GzipDecoder(proxyForInterface(IResponse)): """ A wrapper for a L{Response} instance which handles gzip'ed body. @ivar original: The original L{Response} object. @since: 11.1 """ def __init__(self, response): self.original = response self.length = UNKNOWN_LENGTH def deliverBody(self, protocol): """ Override C{deliverBody} to wrap the given C{protocol} with L{_GzipProtocol}. """ self.original.deliverBody(_GzipProtocol(protocol, self.original)) class _GzipProtocol(proxyForInterface(IProtocol)): """ A L{Protocol} implementation which wraps another one, transparently decompressing received data. @ivar _zlibDecompress: A zlib decompress object used to decompress the data stream. @ivar _response: A reference to the original response, in case of errors. @since: 11.1 """ def __init__(self, protocol, response): self.original = protocol self._response = response self._zlibDecompress = zlib.decompressobj(16 + zlib.MAX_WBITS) def dataReceived(self, data): """ Decompress C{data} with the zlib decompressor, forwarding the raw data to the original protocol. """ try: rawData = self._zlibDecompress.decompress(data) except zlib.error: raise ResponseFailed([Failure()], self._response) if rawData: self.original.dataReceived(rawData) def connectionLost(self, reason): """ Forward the connection lost event, flushing remaining data from the decompressor if any. """ try: rawData = self._zlibDecompress.flush() except zlib.error: raise ResponseFailed([reason, Failure()], self._response) if rawData: self.original.dataReceived(rawData) self.original.connectionLost(reason) @implementer(IAgent) class ContentDecoderAgent(object): """ An L{Agent} wrapper to handle encoded content. It takes care of declaring the support for content in the I{Accept-Encoding} header, and automatically decompresses the received data if it's effectively using compression. @param decoders: A list or tuple of (name, decoder) objects. The name declares which decoding the decoder supports, and the decoder must return a response object when called/instantiated. For example, C{(('gzip', GzipDecoder))}. The order determines how the decoders are going to be advertized to the server. @since: 11.1 """ def __init__(self, agent, decoders): self._agent = agent self._decoders = dict(decoders) self._supported = b','.join([decoder[0] for decoder in decoders]) def request(self, method, uri, headers=None, bodyProducer=None): """ Send a client request which declares supporting compressed content. @see: L{Agent.request}. """ if headers is None: headers = Headers() else: headers = headers.copy() headers.addRawHeader(b'accept-encoding', self._supported) deferred = self._agent.request(method, uri, headers, bodyProducer) return deferred.addCallback(self._handleResponse) def _handleResponse(self, response): """ Check if the response is encoded, and wrap it to handle decompression. """ contentEncodingHeaders = response.headers.getRawHeaders( b'content-encoding', []) contentEncodingHeaders = b','.join(contentEncodingHeaders).split(b',') while contentEncodingHeaders: name = contentEncodingHeaders.pop().strip() decoder = self._decoders.get(name) if decoder is not None: response = decoder(response) else: # Add it back contentEncodingHeaders.append(name) break if contentEncodingHeaders: response.headers.setRawHeaders( b'content-encoding', [b','.join(contentEncodingHeaders)]) else: response.headers.removeHeader(b'content-encoding') return response @implementer(IAgent) class RedirectAgent(object): """ An L{Agent} wrapper which handles HTTP redirects. The implementation is rather strict: 301 and 302 behaves like 307, not redirecting automatically on methods different from I{GET} and I{HEAD}. See L{BrowserLikeRedirectAgent} for a redirecting Agent that behaves more like a web browser. @param redirectLimit: The maximum number of times the agent is allowed to follow redirects before failing with a L{error.InfiniteRedirection}. @cvar _redirectResponses: A L{list} of HTTP status codes to be redirected for I{GET} and I{HEAD} methods. @cvar _seeOtherResponses: A L{list} of HTTP status codes to be redirected for any method and the method altered to I{GET}. @since: 11.1 """ _redirectResponses = [http.MOVED_PERMANENTLY, http.FOUND, http.TEMPORARY_REDIRECT] _seeOtherResponses = [http.SEE_OTHER] def __init__(self, agent, redirectLimit=20): self._agent = agent self._redirectLimit = redirectLimit def request(self, method, uri, headers=None, bodyProducer=None): """ Send a client request following HTTP redirects. @see: L{Agent.request}. """ deferred = self._agent.request(method, uri, headers, bodyProducer) return deferred.addCallback( self._handleResponse, method, uri, headers, 0) def _resolveLocation(self, requestURI, location): """ Resolve the redirect location against the request I{URI}. @type requestURI: C{bytes} @param requestURI: The request I{URI}. @type location: C{bytes} @param location: The redirect location. @rtype: C{bytes} @return: Final resolved I{URI}. """ return _urljoin(requestURI, location) def _handleRedirect(self, response, method, uri, headers, redirectCount): """ Handle a redirect response, checking the number of redirects already followed, and extracting the location header fields. """ if redirectCount >= self._redirectLimit: err = error.InfiniteRedirection( response.code, b'Infinite redirection detected', location=uri) raise ResponseFailed([Failure(err)], response) locationHeaders = response.headers.getRawHeaders(b'location', []) if not locationHeaders: err = error.RedirectWithNoLocation( response.code, b'No location header field', uri) raise ResponseFailed([Failure(err)], response) location = self._resolveLocation(uri, locationHeaders[0]) deferred = self._agent.request(method, location, headers) def _chainResponse(newResponse): newResponse.setPreviousResponse(response) return newResponse deferred.addCallback(_chainResponse) return deferred.addCallback( self._handleResponse, method, uri, headers, redirectCount + 1) def _handleResponse(self, response, method, uri, headers, redirectCount): """ Handle the response, making another request if it indicates a redirect. """ if response.code in self._redirectResponses: if method not in (b'GET', b'HEAD'): err = error.PageRedirect(response.code, location=uri) raise ResponseFailed([Failure(err)], response) return self._handleRedirect(response, method, uri, headers, redirectCount) elif response.code in self._seeOtherResponses: return self._handleRedirect(response, b'GET', uri, headers, redirectCount) return response class BrowserLikeRedirectAgent(RedirectAgent): """ An L{Agent} wrapper which handles HTTP redirects in the same fashion as web browsers. Unlike L{RedirectAgent}, the implementation is more relaxed: 301 and 302 behave like 303, redirecting automatically on any method and altering the redirect request to a I{GET}. @see: L{RedirectAgent} @since: 13.1 """ _redirectResponses = [http.TEMPORARY_REDIRECT] _seeOtherResponses = [http.MOVED_PERMANENTLY, http.FOUND, http.SEE_OTHER] class _ReadBodyProtocol(protocol.Protocol): """ Protocol that collects data sent to it. This is a helper for L{IResponse.deliverBody}, which collects the body and fires a deferred with it. @ivar deferred: See L{__init__}. @ivar status: See L{__init__}. @ivar message: See L{__init__}. @ivar dataBuffer: list of byte-strings received @type dataBuffer: L{list} of L{bytes} """ def __init__(self, status, message, deferred): """ @param status: Status of L{IResponse} @ivar status: L{int} @param message: Message of L{IResponse} @type message: L{bytes} @param deferred: deferred to fire when response is complete @type deferred: L{Deferred} firing with L{bytes} """ self.deferred = deferred self.status = status self.message = message self.dataBuffer = [] def dataReceived(self, data): """ Accumulate some more bytes from the response. """ self.dataBuffer.append(data) def connectionLost(self, reason): """ Deliver the accumulated response bytes to the waiting L{Deferred}, if the response body has been completely received without error. """ if reason.check(ResponseDone): self.deferred.callback(b''.join(self.dataBuffer)) elif reason.check(PotentialDataLoss): self.deferred.errback( PartialDownloadError(self.status, self.message, b''.join(self.dataBuffer))) else: self.deferred.errback(reason) def readBody(response): """ Get the body of an L{IResponse} and return it as a byte string. This is a helper function for clients that don't want to incrementally receive the body of an HTTP response. @param response: The HTTP response for which the body will be read. @type response: L{IResponse} provider @return: A L{Deferred} which will fire with the body of the response. Cancelling it will close the connection to the server immediately. """ def cancel(deferred): """ Cancel a L{readBody} call, close the connection to the HTTP server immediately, if it is still open. @param deferred: The cancelled L{defer.Deferred}. """ abort = getAbort() if abort is not None: abort() d = defer.Deferred(cancel) protocol = _ReadBodyProtocol(response.code, response.phrase, d) def getAbort(): return getattr(protocol.transport, 'abortConnection', None) response.deliverBody(protocol) if protocol.transport is not None and getAbort() is None: warnings.warn( 'Using readBody with a transport that does not have an ' 'abortConnection method', category=DeprecationWarning, stacklevel=2) return d __all__ = [ 'Agent', 'BrowserLikeRedirectAgent', 'ContentDecoderAgent', 'CookieAgent', 'downloadPage', 'getPage', 'GzipDecoder', 'HTTPClientFactory', 'HTTPConnectionPool', 'HTTPDownloader', 'HTTPPageDownloader', 'HTTPPageGetter', 'PartialDownloadError', 'ProxyAgent', 'readBody', 'RedirectAgent', 'RequestGenerationFailed', 'RequestTransmissionFailed', 'Response', 'ResponseDone', 'ResponseFailed', 'ResponseNeverReceived', 'URI', ]
./CrossVul/dataset_final_sorted/CWE-20/py/bad_872_1
crossvul-python_data_bad_4374_2
""" Systemd service utilities. Contains functions to start, stop & poll systemd services. Probably not very useful outside this spawner. """ import asyncio import shlex async def start_transient_service( unit_name, cmd, args, working_dir, environment_variables=None, properties=None, uid=None, gid=None, slice=None, ): """ Start a systemd transient service with given paramters """ run_cmd = [ 'systemd-run', '--unit', unit_name, ] if properties: for key, value in properties.items(): if isinstance(value, list): run_cmd += ['--property={}={}'.format(key, v) for v in value] else: # A string! run_cmd.append('--property={}={}'.format(key, value)) if environment_variables: run_cmd += [ '--setenv={}={}'.format(key, value) for key, value in environment_variables.items() ] # Explicitly check if uid / gid are not None, since 0 is valid value for both if uid is not None: run_cmd += ['--uid', str(uid)] if gid is not None: run_cmd += ['--gid', str(gid)] if slice is not None: run_cmd += ['--slice={}'.format(slice)] # We unfortunately have to resort to doing cd with bash, since WorkingDirectory property # of systemd units can't be set for transient units via systemd-run until systemd v227. # Centos 7 has systemd 219, and will probably never upgrade - so we need to support them. run_cmd += [ '/bin/bash', '-c', "cd {wd} && exec {cmd} {args}".format( wd=shlex.quote(working_dir), cmd=' '.join([shlex.quote(c) for c in cmd]), args=' '.join([shlex.quote(a) for a in args]) ) ] proc = await asyncio.create_subprocess_exec(*run_cmd) return await proc.wait() async def service_running(unit_name): """ Return true if service with given name is running (active). """ proc = await asyncio.create_subprocess_exec( 'systemctl', 'is-active', unit_name, # hide stdout, but don't capture stderr at all stdout=asyncio.subprocess.DEVNULL ) ret = await proc.wait() return ret == 0 async def service_failed(unit_name): """ Return true if service with given name is in a failed state. """ proc = await asyncio.create_subprocess_exec( 'systemctl', 'is-failed', unit_name, # hide stdout, but don't capture stderr at all stdout=asyncio.subprocess.DEVNULL ) ret = await proc.wait() return ret == 0 async def stop_service(unit_name): """ Stop service with given name. Throws CalledProcessError if stopping fails """ proc = await asyncio.create_subprocess_exec( 'systemctl', 'stop', unit_name ) await proc.wait() async def reset_service(unit_name): """ Reset service with given name. Throws CalledProcessError if resetting fails """ proc = await asyncio.create_subprocess_exec( 'systemctl', 'reset-failed', unit_name ) await proc.wait()
./CrossVul/dataset_final_sorted/CWE-668/py/bad_4374_2
crossvul-python_data_good_4374_2
""" Systemd service utilities. Contains functions to start, stop & poll systemd services. Probably not very useful outside this spawner. """ import asyncio import os import re import shlex import warnings # light validation of environment variable keys env_pat = re.compile("[A-Za-z_]+") RUN_ROOT = "/run" def ensure_environment_directory(environment_file_directory): """Ensure directory for environment files exists and is private""" # ensure directory exists os.makedirs(environment_file_directory, mode=0o700, exist_ok=True) # validate permissions mode = os.stat(environment_file_directory).st_mode if mode & 0o077: warnings.warn( f"Fixing permissions on environment directory {environment_file_directory}: {oct(mode)}", RuntimeWarning, ) os.chmod(environment_file_directory, 0o700) else: return # Check again after supposedly fixing. # Some filesystems can have weird issues, preventing this from having desired effect mode = os.stat(environment_file_directory).st_mode if mode & 0o077: warnings.warn( f"Bad permissions on environment directory {environment_file_directory}: {oct(mode)}", RuntimeWarning, ) def make_environment_file(environment_file_directory, unit_name, environment_variables): """Make a systemd environment file - ensures environment directory exists and is private - writes private environment file - returns path to created environment file """ ensure_environment_directory(environment_file_directory) env_file = os.path.join(environment_file_directory, f"{unit_name}.env") env_lines = [] for key, value in sorted(environment_variables.items()): assert env_pat.match(key), f"{key} not a valid environment variable" env_lines.append(f"{key}={shlex.quote(value)}") env_lines.append("") # trailing newline with open(env_file, mode="w") as f: # make the file itself private as well os.fchmod(f.fileno(), 0o400) f.write("\n".join(env_lines)) return env_file async def start_transient_service( unit_name, cmd, args, working_dir, environment_variables=None, properties=None, uid=None, gid=None, slice=None, ): """ Start a systemd transient service with given parameters """ run_cmd = [ 'systemd-run', '--unit', unit_name, ] if properties is None: properties = {} else: properties = properties.copy() # ensure there is a runtime directory where we can put our env file # If already set, can be space-separated list of paths runtime_directories = properties.setdefault("RuntimeDirectory", unit_name).split() # runtime directories are always resolved relative to `/run` # grab the first item, if more than one runtime_dir = os.path.join(RUN_ROOT, runtime_directories[0]) # make runtime directories private by default properties.setdefault("RuntimeDirectoryMode", "700") # preserve runtime directories across restarts # allows `systemctl restart` to load the env properties.setdefault("RuntimeDirectoryPreserve", "restart") if properties: for key, value in properties.items(): if isinstance(value, list): run_cmd += ['--property={}={}'.format(key, v) for v in value] else: # A string! run_cmd.append('--property={}={}'.format(key, value)) if environment_variables: environment_file = make_environment_file( runtime_dir, unit_name, environment_variables ) run_cmd.append(f"--property=EnvironmentFile={environment_file}") # Explicitly check if uid / gid are not None, since 0 is valid value for both if uid is not None: run_cmd += ['--uid', str(uid)] if gid is not None: run_cmd += ['--gid', str(gid)] if slice is not None: run_cmd += ['--slice={}'.format(slice)] # We unfortunately have to resort to doing cd with bash, since WorkingDirectory property # of systemd units can't be set for transient units via systemd-run until systemd v227. # Centos 7 has systemd 219, and will probably never upgrade - so we need to support them. run_cmd += [ '/bin/bash', '-c', "cd {wd} && exec {cmd} {args}".format( wd=shlex.quote(working_dir), cmd=' '.join([shlex.quote(c) for c in cmd]), args=' '.join([shlex.quote(a) for a in args]) ) ] proc = await asyncio.create_subprocess_exec(*run_cmd) return await proc.wait() async def service_running(unit_name): """ Return true if service with given name is running (active). """ proc = await asyncio.create_subprocess_exec( 'systemctl', 'is-active', unit_name, # hide stdout, but don't capture stderr at all stdout=asyncio.subprocess.DEVNULL ) ret = await proc.wait() return ret == 0 async def service_failed(unit_name): """ Return true if service with given name is in a failed state. """ proc = await asyncio.create_subprocess_exec( 'systemctl', 'is-failed', unit_name, # hide stdout, but don't capture stderr at all stdout=asyncio.subprocess.DEVNULL ) ret = await proc.wait() return ret == 0 async def stop_service(unit_name): """ Stop service with given name. Throws CalledProcessError if stopping fails """ proc = await asyncio.create_subprocess_exec( 'systemctl', 'stop', unit_name ) await proc.wait() async def reset_service(unit_name): """ Reset service with given name. Throws CalledProcessError if resetting fails """ proc = await asyncio.create_subprocess_exec( 'systemctl', 'reset-failed', unit_name ) await proc.wait()
./CrossVul/dataset_final_sorted/CWE-668/py/good_4374_2
crossvul-python_data_bad_4374_1
from setuptools import setup setup( name='jupyterhub-systemdspawner', version='0.14', description='JupyterHub Spawner using systemd for resource isolation', long_description='See https://github.com/jupyterhub/systemdspawner for more info', url='https://github.com/jupyterhub/systemdspawner', author='Yuvi Panda', author_email='yuvipanda@gmail.com', license='3 Clause BSD', packages=['systemdspawner'], entry_points={ 'jupyterhub.spawners': [ 'systemdspawner = systemdspawner:SystemdSpawner', ], }, install_requires=[ 'jupyterhub>=0.9', 'tornado>=5.0' ], )
./CrossVul/dataset_final_sorted/CWE-668/py/bad_4374_1
crossvul-python_data_good_4374_1
from setuptools import setup setup( name='jupyterhub-systemdspawner', version='0.15.0', description='JupyterHub Spawner using systemd for resource isolation', long_description='See https://github.com/jupyterhub/systemdspawner for more info', url='https://github.com/jupyterhub/systemdspawner', author='Yuvi Panda', author_email='yuvipanda@gmail.com', license='3 Clause BSD', packages=['systemdspawner'], entry_points={ 'jupyterhub.spawners': [ 'systemdspawner = systemdspawner:SystemdSpawner', ], }, install_requires=[ 'jupyterhub>=0.9', 'tornado>=5.0' ], )
./CrossVul/dataset_final_sorted/CWE-668/py/good_4374_1
crossvul-python_data_bad_744_1
"""Parse (absolute and relative) URLs. urlparse module is based upon the following RFC specifications. RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding and L. Masinter, January 2005. RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter and L.Masinter, December 1999. RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. Berners-Lee, R. Fielding, and L. Masinter, August 1998. RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June 1995. RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. McCahill, December 1994 RFC 3986 is considered the current standard and any future changes to urlparse module should conform with it. The urlparse module is currently not entirely compliant with this RFC due to defacto scenarios for parsing, and for backward compatibility purposes, some parsing quirks from older RFCs are retained. The testcases in test_urlparse.py provides a good indicator of parsing behavior. """ import re import sys import collections __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", "urlsplit", "urlunsplit", "urlencode", "parse_qs", "parse_qsl", "quote", "quote_plus", "quote_from_bytes", "unquote", "unquote_plus", "unquote_to_bytes", "DefragResult", "ParseResult", "SplitResult", "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] # A classification of schemes. # The empty string classifies URLs with no scheme specified, # being the default value returned by “urlsplit” and “urlparse”. uses_relative = ['', 'ftp', 'http', 'gopher', 'nntp', 'imap', 'wais', 'file', 'https', 'shttp', 'mms', 'prospero', 'rtsp', 'rtspu', 'sftp', 'svn', 'svn+ssh', 'ws', 'wss'] uses_netloc = ['', 'ftp', 'http', 'gopher', 'nntp', 'telnet', 'imap', 'wais', 'file', 'mms', 'https', 'shttp', 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh', 'ws', 'wss'] uses_params = ['', 'ftp', 'hdl', 'prospero', 'http', 'imap', 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', 'mms', 'sftp', 'tel'] # These are not actually used anymore, but should stay for backwards # compatibility. (They are undocumented, but have a public-looking name.) non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] uses_query = ['', 'http', 'wais', 'imap', 'https', 'shttp', 'mms', 'gopher', 'rtsp', 'rtspu', 'sip', 'sips'] uses_fragment = ['', 'ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais', 'https', 'shttp', 'snews', 'file', 'prospero'] # Characters valid in scheme names scheme_chars = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789' '+-.') # XXX: Consider replacing with functools.lru_cache MAX_CACHE_SIZE = 20 _parse_cache = {} def clear_cache(): """Clear the parse cache and the quoters cache.""" _parse_cache.clear() _safe_quoters.clear() # Helpers for bytes handling # For 3.2, we deliberately require applications that # handle improperly quoted URLs to do their own # decoding and encoding. If valid use cases are # presented, we may relax this by using latin-1 # decoding internally for 3.3 _implicit_encoding = 'ascii' _implicit_errors = 'strict' def _noop(obj): return obj def _encode_result(obj, encoding=_implicit_encoding, errors=_implicit_errors): return obj.encode(encoding, errors) def _decode_args(args, encoding=_implicit_encoding, errors=_implicit_errors): return tuple(x.decode(encoding, errors) if x else '' for x in args) def _coerce_args(*args): # Invokes decode if necessary to create str args # and returns the coerced inputs along with # an appropriate result coercion function # - noop for str inputs # - encoding function otherwise str_input = isinstance(args[0], str) for arg in args[1:]: # We special-case the empty string to support the # "scheme=''" default argument to some functions if arg and isinstance(arg, str) != str_input: raise TypeError("Cannot mix str and non-str arguments") if str_input: return args + (_noop,) return _decode_args(args) + (_encode_result,) # Result objects are more helpful than simple tuples class _ResultMixinStr(object): """Standard approach to encoding parsed results from str to bytes""" __slots__ = () def encode(self, encoding='ascii', errors='strict'): return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) class _ResultMixinBytes(object): """Standard approach to decoding parsed results from bytes to str""" __slots__ = () def decode(self, encoding='ascii', errors='strict'): return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) class _NetlocResultMixinBase(object): """Shared methods for the parsed result objects containing a netloc element""" __slots__ = () @property def username(self): return self._userinfo[0] @property def password(self): return self._userinfo[1] @property def hostname(self): hostname = self._hostinfo[0] if not hostname: return None # Scoped IPv6 address may have zone info, which must not be lowercased # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys separator = '%' if isinstance(hostname, str) else b'%' hostname, percent, zone = hostname.partition(separator) return hostname.lower() + percent + zone @property def port(self): port = self._hostinfo[1] if port is not None: port = int(port, 10) if not ( 0 <= port <= 65535): raise ValueError("Port out of range 0-65535") return port class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): __slots__ = () @property def _userinfo(self): netloc = self.netloc userinfo, have_info, hostinfo = netloc.rpartition('@') if have_info: username, have_password, password = userinfo.partition(':') if not have_password: password = None else: username = password = None return username, password @property def _hostinfo(self): netloc = self.netloc _, _, hostinfo = netloc.rpartition('@') _, have_open_br, bracketed = hostinfo.partition('[') if have_open_br: hostname, _, port = bracketed.partition(']') _, _, port = port.partition(':') else: hostname, _, port = hostinfo.partition(':') if not port: port = None return hostname, port class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): __slots__ = () @property def _userinfo(self): netloc = self.netloc userinfo, have_info, hostinfo = netloc.rpartition(b'@') if have_info: username, have_password, password = userinfo.partition(b':') if not have_password: password = None else: username = password = None return username, password @property def _hostinfo(self): netloc = self.netloc _, _, hostinfo = netloc.rpartition(b'@') _, have_open_br, bracketed = hostinfo.partition(b'[') if have_open_br: hostname, _, port = bracketed.partition(b']') _, _, port = port.partition(b':') else: hostname, _, port = hostinfo.partition(b':') if not port: port = None return hostname, port from collections import namedtuple _DefragResultBase = namedtuple('DefragResult', 'url fragment') _SplitResultBase = namedtuple( 'SplitResult', 'scheme netloc path query fragment') _ParseResultBase = namedtuple( 'ParseResult', 'scheme netloc path params query fragment') _DefragResultBase.__doc__ = """ DefragResult(url, fragment) A 2-tuple that contains the url without fragment identifier and the fragment identifier as a separate argument. """ _DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" _DefragResultBase.fragment.__doc__ = """ Fragment identifier separated from URL, that allows indirect identification of a secondary resource by reference to a primary resource and additional identifying information. """ _SplitResultBase.__doc__ = """ SplitResult(scheme, netloc, path, query, fragment) A 5-tuple that contains the different components of a URL. Similar to ParseResult, but does not split params. """ _SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" _SplitResultBase.netloc.__doc__ = """ Network location where the request is made to. """ _SplitResultBase.path.__doc__ = """ The hierarchical path, such as the path to a file to download. """ _SplitResultBase.query.__doc__ = """ The query component, that contains non-hierarchical data, that along with data in path component, identifies a resource in the scope of URI's scheme and network location. """ _SplitResultBase.fragment.__doc__ = """ Fragment identifier, that allows indirect identification of a secondary resource by reference to a primary resource and additional identifying information. """ _ParseResultBase.__doc__ = """ ParseResult(scheme, netloc, path, params, query, fragment) A 6-tuple that contains components of a parsed URL. """ _ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ _ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ _ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ _ParseResultBase.params.__doc__ = """ Parameters for last path element used to dereference the URI in order to provide access to perform some operation on the resource. """ _ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ _ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ # For backwards compatibility, alias _NetlocResultMixinStr # ResultBase is no longer part of the documented API, but it is # retained since deprecating it isn't worth the hassle ResultBase = _NetlocResultMixinStr # Structured result objects for string data class DefragResult(_DefragResultBase, _ResultMixinStr): __slots__ = () def geturl(self): if self.fragment: return self.url + '#' + self.fragment else: return self.url class SplitResult(_SplitResultBase, _NetlocResultMixinStr): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResult(_ParseResultBase, _NetlocResultMixinStr): __slots__ = () def geturl(self): return urlunparse(self) # Structured result objects for bytes data class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): __slots__ = () def geturl(self): if self.fragment: return self.url + b'#' + self.fragment else: return self.url class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): __slots__ = () def geturl(self): return urlunparse(self) # Set up the encode/decode result pairs def _fix_result_transcoding(): _result_pairs = ( (DefragResult, DefragResultBytes), (SplitResult, SplitResultBytes), (ParseResult, ParseResultBytes), ) for _decoded, _encoded in _result_pairs: _decoded._encoded_counterpart = _encoded _encoded._decoded_counterpart = _decoded _fix_result_transcoding() del _fix_result_transcoding def urlparse(url, scheme='', allow_fragments=True): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) splitresult = urlsplit(url, scheme, allow_fragments) scheme, netloc, url, query, fragment = splitresult if scheme in uses_params and ';' in url: url, params = _splitparams(url) else: params = '' result = ParseResult(scheme, netloc, url, params, query, fragment) return _coerce_result(result) def _splitparams(url): if '/' in url: i = url.find(';', url.rfind('/')) if i < 0: return url, '' else: i = url.find(';') return url[:i], url[i+1:] def _splitnetloc(url, start=0): delim = len(url) # position of end of domain part of url, default is end for c in '/?#': # look for delimiters; the order is NOT important wdelim = url.find(c, start) # find first of this delim if wdelim >= 0: # if found delim = min(delim, wdelim) # use earliest delim position return url[start:delim], url[delim:] # return (domain, rest) def _checknetloc(netloc): if not netloc or not any(ord(c) > 127 for c in netloc): return # looking for characters like \u2100 that expand to 'a/c' # IDNA uses NFKC equivalence, so normalize for this check import unicodedata n = netloc.rpartition('@')[2] # ignore anything to the left of '@' n = n.replace(':', '') # ignore characters already included n = n.replace('#', '') # but not the surrounding text n = n.replace('?', '') netloc2 = unicodedata.normalize('NFKC', n) if n == netloc2: return for c in '/?#@:': if c in netloc2: raise ValueError("netloc '" + netloc + "' contains invalid " + "characters under NFKC normalization") def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) allow_fragments = bool(allow_fragments) key = url, scheme, allow_fragments, type(url), type(scheme) cached = _parse_cache.get(key, None) if cached: return _coerce_result(cached) if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case scheme = url[:i].lower() url = url[i+1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) for c in url[:i]: if c not in scheme_chars: break else: # make sure "url" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i+1:] if not rest or any(c not in '0123456789' for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) def urlunparse(components): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" scheme, netloc, url, params, query, fragment, _coerce_result = ( _coerce_args(*components)) if params: url = "%s;%s" % (url, params) return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) def urlunsplit(components): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" scheme, netloc, url, query, fragment, _coerce_result = ( _coerce_args(*components)) if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if query: url = url + '?' + query if fragment: url = url + '#' + fragment return _coerce_result(url) def urljoin(base, url, allow_fragments=True): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url if not url: return base base, url, _coerce_result = _coerce_args(base, url) bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return _coerce_result(url) if scheme in uses_netloc: if netloc: return _coerce_result(urlunparse((scheme, netloc, path, params, query, fragment))) netloc = bnetloc if not path and not params: path = bpath params = bparams if not query: query = bquery return _coerce_result(urlunparse((scheme, netloc, path, params, query, fragment))) base_parts = bpath.split('/') if base_parts[-1] != '': # the last item is not a directory, so will not be taken into account # in resolving the relative path del base_parts[-1] # for rfc3986, ignore all base path should the first character be root. if path[:1] == '/': segments = path.split('/') else: segments = base_parts + path.split('/') # filter out elements that would cause redundant slashes on re-joining # the resolved_path segments[1:-1] = filter(None, segments[1:-1]) resolved_path = [] for seg in segments: if seg == '..': try: resolved_path.pop() except IndexError: # ignore any .. segments that would otherwise cause an IndexError # when popped from resolved_path if resolving for rfc3986 pass elif seg == '.': continue else: resolved_path.append(seg) if segments[-1] in ('.', '..'): # do some post-processing here. if the last segment was a relative dir, # then we need to append the trailing '/' resolved_path.append('') return _coerce_result(urlunparse((scheme, netloc, '/'.join( resolved_path) or '/', params, query, fragment))) def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ url, _coerce_result = _coerce_args(url) if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) else: frag = '' defrag = url return _coerce_result(DefragResult(defrag, frag)) _hexdig = '0123456789ABCDEFabcdef' _hextobyte = None def unquote_to_bytes(string): """unquote_to_bytes('abc%20def') -> b'abc def'.""" # Note: strings are encoded as UTF-8. This is only an issue if it contains # unescaped non-ASCII characters, which URIs should not. if not string: # Is it a string-like object? string.split return b'' if isinstance(string, str): string = string.encode('utf-8') bits = string.split(b'%') if len(bits) == 1: return string res = [bits[0]] append = res.append # Delay the initialization of the table to not waste memory # if the function is never called global _hextobyte if _hextobyte is None: _hextobyte = {(a + b).encode(): bytes([int(a + b, 16)]) for a in _hexdig for b in _hexdig} for item in bits[1:]: try: append(_hextobyte[item[:2]]) append(item[2:]) except KeyError: append(b'%') append(item) return b''.join(res) _asciire = re.compile('([\x00-\x7f]+)') def unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res) def parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. encoding and errors: specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). Returns a dictionary. """ parsed_result = {} pairs = parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors, max_num_fields=max_num_fields) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. encoding and errors: specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). Returns a list, as G-d intended. """ qs, _coerce_result = _coerce_args(qs) # If max_num_fields is defined then check that the number of fields # is less than max_num_fields. This prevents a memory exhaustion DOS # attack via post bodies with many fields. if max_num_fields is not None: num_fields = 1 + qs.count('&') + qs.count(';') if max_num_fields < num_fields: raise ValueError('Max number of fields exceeded') pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def unquote_plus(string, encoding='utf-8', errors='replace'): """Like unquote(), but also replace plus signs by spaces, as required for unquoting HTML form values. unquote_plus('%7e/abc+def') -> '~/abc def' """ string = string.replace('+', ' ') return unquote(string, encoding, errors) _ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' b'abcdefghijklmnopqrstuvwxyz' b'0123456789' b'_.-') _ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) _safe_quoters = {} class Quoter(collections.defaultdict): """A mapping from bytes (in range(0,256)) to strings. String values are percent-encoded byte values, unless the key < 128, and in the "safe" set (either the specified safe set, or default set). """ # Keeps a cache internally, using defaultdict, for efficiency (lookups # of cached keys don't call Python code at all). def __init__(self, safe): """safe: bytes object.""" self.safe = _ALWAYS_SAFE.union(safe) def __repr__(self): # Without this, will just display as a defaultdict return "<%s %r>" % (self.__class__.__name__, dict(self)) def __missing__(self, b): # Handle a cache miss. Store quoted string in cache and return. res = chr(b) if b in self.safe else '%{:02X}'.format(b) self[b] = res return res def quote(string, safe='/', encoding=None, errors=None): """quote('abc def') -> 'abc%20def' Each part of a URL, e.g. the path info, the query, etc., has a different set of reserved characters that must be quoted. RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists the following reserved characters. reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," Each of these characters is reserved in some component of a URL, but not necessarily in all of them. By default, the quote function is intended for quoting the path section of a URL. Thus, it will not encode '/'. This character is reserved, but in typical usage the quote function is being called on a path where the existing slash characters are used as reserved characters. string and safe may be either str or bytes objects. encoding and errors must not be specified if string is a bytes object. The optional encoding and errors parameters specify how to deal with non-ASCII characters, as accepted by the str.encode method. By default, encoding='utf-8' (characters are encoded with UTF-8), and errors='strict' (unsupported characters raise a UnicodeEncodeError). """ if isinstance(string, str): if not string: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'strict' string = string.encode(encoding, errors) else: if encoding is not None: raise TypeError("quote() doesn't support 'encoding' for bytes") if errors is not None: raise TypeError("quote() doesn't support 'errors' for bytes") return quote_from_bytes(string, safe) def quote_plus(string, safe='', encoding=None, errors=None): """Like quote(), but also replace ' ' with '+', as required for quoting HTML form values. Plus signs in the original string are escaped unless they are included in safe. It also does not have safe default to '/'. """ # Check if ' ' in string, where string may either be a str or bytes. If # there are no spaces, the regular quote will produce the right answer. if ((isinstance(string, str) and ' ' not in string) or (isinstance(string, bytes) and b' ' not in string)): return quote(string, safe, encoding, errors) if isinstance(safe, str): space = ' ' else: space = b' ' string = quote(string, safe + space, encoding, errors) return string.replace(' ', '+') def quote_from_bytes(bs, safe='/'): """Like quote(), but accepts a bytes object rather than a str, and does not perform string-to-bytes encoding. It always returns an ASCII string. quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' """ if not isinstance(bs, (bytes, bytearray)): raise TypeError("quote_from_bytes() expected bytes") if not bs: return '' if isinstance(safe, str): # Normalize 'safe' by converting to bytes and removing non-ASCII chars safe = safe.encode('ascii', 'ignore') else: safe = bytes([c for c in safe if c < 128]) if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): return bs.decode() try: quoter = _safe_quoters[safe] except KeyError: _safe_quoters[safe] = quoter = Quoter(safe).__getitem__ return ''.join([quoter(char) for char in bs]) def urlencode(query, doseq=False, safe='', encoding=None, errors=None, quote_via=quote_plus): """Encode a dict or sequence of two-element tuples into a URL query string. If any values in the query arg are sequences and doseq is true, each sequence element is converted to a separate parameter. If the query arg is a sequence of two-element tuples, the order of the parameters in the output will match the order of parameters in the input. The components of a query arg may each be either a string or a bytes type. The safe, encoding, and errors parameters are passed down to the function specified by quote_via (encoding and errors only if a component is a str). """ if hasattr(query, "items"): query = query.items() else: # It's a bother at times that strings and string-like objects are # sequences. try: # non-sequence items should not work with len() # non-empty strings will fail this if len(query) and not isinstance(query[0], tuple): raise TypeError # Zero-length sequences of all types will get here and succeed, # but that's a minor nit. Since the original implementation # allowed empty dicts that type of behavior probably should be # preserved for consistency except TypeError: ty, va, tb = sys.exc_info() raise TypeError("not a valid non-string sequence " "or mapping object").with_traceback(tb) l = [] if not doseq: for k, v in query: if isinstance(k, bytes): k = quote_via(k, safe) else: k = quote_via(str(k), safe, encoding, errors) if isinstance(v, bytes): v = quote_via(v, safe) else: v = quote_via(str(v), safe, encoding, errors) l.append(k + '=' + v) else: for k, v in query: if isinstance(k, bytes): k = quote_via(k, safe) else: k = quote_via(str(k), safe, encoding, errors) if isinstance(v, bytes): v = quote_via(v, safe) l.append(k + '=' + v) elif isinstance(v, str): v = quote_via(v, safe, encoding, errors) l.append(k + '=' + v) else: try: # Is this a sufficient test for sequence-ness? x = len(v) except TypeError: # not a sequence v = quote_via(str(v), safe, encoding, errors) l.append(k + '=' + v) else: # loop over the sequence for elt in v: if isinstance(elt, bytes): elt = quote_via(elt, safe) else: elt = quote_via(str(elt), safe, encoding, errors) l.append(k + '=' + elt) return '&'.join(l) def to_bytes(url): """to_bytes(u"URL") --> 'URL'.""" # Most URL schemes require ASCII. If that changes, the conversion # can be relaxed. # XXX get rid of to_bytes() if isinstance(url, str): try: url = url.encode("ASCII").decode() except UnicodeError: raise UnicodeError("URL " + repr(url) + " contains non-ASCII characters") return url def unwrap(url): """unwrap('<URL:type://host/path>') --> 'type://host/path'.""" url = str(url).strip() if url[:1] == '<' and url[-1:] == '>': url = url[1:-1].strip() if url[:4] == 'URL:': url = url[4:].strip() return url _typeprog = None def splittype(url): """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" global _typeprog if _typeprog is None: _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) match = _typeprog.match(url) if match: scheme, data = match.groups() return scheme.lower(), data return None, url _hostprog = None def splithost(url): """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" global _hostprog if _hostprog is None: _hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL) match = _hostprog.match(url) if match: host_port, path = match.groups() if path and path[0] != '/': path = '/' + path return host_port, path return None, url def splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" user, delim, host = host.rpartition('@') return (user if delim else None), host def splitpasswd(user): """splitpasswd('user:passwd') -> 'user', 'passwd'.""" user, delim, passwd = user.partition(':') return user, (passwd if delim else None) # splittag('/path#tag') --> '/path', 'tag' _portprog = None def splitport(host): """splitport('host:port') --> 'host', 'port'.""" global _portprog if _portprog is None: _portprog = re.compile('(.*):([0-9]*)$', re.DOTALL) match = _portprog.match(host) if match: host, port = match.groups() if port: return host, port return host, None def splitnport(host, defport=-1): """Split host and port, returning numeric port. Return given default port if no ':' found; defaults to -1. Return numerical port if a valid number are found after ':'. Return None if ':' but not a valid number.""" host, delim, port = host.rpartition(':') if not delim: host = port elif port: try: nport = int(port) except ValueError: nport = None return host, nport return host, defport def splitquery(url): """splitquery('/path?query') --> '/path', 'query'.""" path, delim, query = url.rpartition('?') if delim: return path, query return url, None def splittag(url): """splittag('/path#tag') --> '/path', 'tag'.""" path, delim, tag = url.rpartition('#') if delim: return path, tag return url, None def splitattr(url): """splitattr('/path;attr1=value1;attr2=value2;...') -> '/path', ['attr1=value1', 'attr2=value2', ...].""" words = url.split(';') return words[0], words[1:] def splitvalue(attr): """splitvalue('attr=value') --> 'attr', 'value'.""" attr, delim, value = attr.partition('=') return attr, (value if delim else None)
./CrossVul/dataset_final_sorted/CWE-255/py/bad_744_1
crossvul-python_data_good_3793_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the EC2 Credentials service. This service allows the creation of access/secret credentials used for the ec2 interop layer of OpenStack. A user can create as many access/secret pairs, each of which map to a specific tenant. This is required because OpenStack supports a user belonging to multiple tenants, whereas the signatures created on ec2-style requests don't allow specification of which tenant the user wishs to act upon. To complete the cycle, we provide a method that OpenStack services can use to validate a signature and get a corresponding openstack token. This token allows method calls to other services within the context the access/secret was created. As an example, nova requests keystone to validate the signature of a request, receives a token, and then makes a request to glance to list images needed to perform the requested task. """ import uuid from keystone import catalog from keystone.common import manager from keystone.common import utils from keystone.common import wsgi from keystone import config from keystone import exception from keystone import identity from keystone import policy from keystone import service from keystone import token CONF = config.CONF class Manager(manager.Manager): """Default pivot point for the EC2 Credentials backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.ec2.driver) class Ec2Extension(wsgi.ExtensionRouter): def add_routes(self, mapper): ec2_controller = Ec2Controller() # validation mapper.connect( '/ec2tokens', controller=ec2_controller, action='authenticate', conditions=dict(method=['POST'])) # crud mapper.connect( '/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='create_credential', conditions=dict(method=['POST'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='get_credentials', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='get_credential', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='delete_credential', conditions=dict(method=['DELETE'])) class Ec2Controller(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() self.ec2_api = Manager() super(Ec2Controller, self).__init__() def check_signature(self, creds_ref, credentials): signer = utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) if utils.auth_str_equal(credentials['signature'], signature): return # NOTE(vish): Some libraries don't use the port when signing # requests, so try again without port. elif ':' in credentials['signature']: hostname, _port = credentials['host'].split(':') credentials['host'] = hostname signature = signer.generate(credentials) if not utils.auth_str_equal(credentials.signature, signature): raise exception.Unauthorized(message='Invalid EC2 signature.') else: raise exception.Unauthorized(message='EC2 signature not supplied.') def authenticate(self, context, credentials=None, ec2Credentials=None): """Validate a signed EC2 request and provide a token. Other services (such as Nova) use this **admin** call to determine if a request they signed received is from a valid user. If it is a valid signature, an openstack token that maps to the user/tenant is returned to the caller, along with all the other details returned from a normal token validation call. The returned token is useful for making calls to other OpenStack services within the context of the request. :param context: standard context :param credentials: dict of ec2 signature :param ec2Credentials: DEPRECATED dict of ec2 signature :returns: token: openstack token equivalent to access key along with the corresponding service catalog and roles """ # FIXME(ja): validate that a service token was used! # NOTE(termie): backwards compat hack if not credentials and ec2Credentials: credentials = ec2Credentials if not 'access' in credentials: raise exception.Unauthorized(message='EC2 signature not supplied.') creds_ref = self._get_credentials(context, credentials['access']) self.check_signature(creds_ref, credentials) # TODO(termie): don't create new tokens every time # TODO(termie): this is copied from TokenController.authenticate token_id = uuid.uuid4().hex tenant_ref = self.identity_api.get_tenant( context=context, tenant_id=creds_ref['tenant_id']) user_ref = self.identity_api.get_user( context=context, user_id=creds_ref['user_id']) metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles = metadata_ref.get('roles', []) if not roles: raise exception.Unauthorized(message='User not valid for tenant.') roles_ref = [self.identity_api.get_role(context, role_id) for role_id in roles] catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): make this a util function or something # TODO(termie): i don't think the ec2 middleware currently expects a # full return, but it contains a note saying that it # would be better to expect a full return token_controller = service.TokenController() return token_controller._format_authenticate( token_ref, roles_ref, catalog_ref) def create_credential(self, context, user_id, tenant_id): """Create a secret/access pair for use with ec2 style auth. Generates a new set of credentials that map the the user/tenant pair. :param context: standard context :param user_id: id of user :param tenant_id: id of tenant :returns: credential: dict of ec2 credential """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) self._assert_valid_tenant_id(context, tenant_id) cred_ref = {'user_id': user_id, 'tenant_id': tenant_id, 'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex} self.ec2_api.create_credential(context, cred_ref['access'], cred_ref) return {'credential': cred_ref} def get_credentials(self, context, user_id): """List all credentials for a user. :param context: standard context :param user_id: id of user :returns: credentials: list of ec2 credential dicts """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) return {'credentials': self.ec2_api.list_credentials(context, user_id)} def get_credential(self, context, user_id, credential_id): """Retreive a user's access/secret pair by the access key. Grab the full access/secret pair for a given access key. :param context: standard context :param user_id: id of user :param credential_id: access key for credentials :returns: credential: dict of ec2 credential """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) creds = self._get_credentials(context, credential_id) return {'credential': creds} def delete_credential(self, context, user_id, credential_id): """Delete a user's access/secret pair. Used to revoke a user's access/secret pair :param context: standard context :param user_id: id of user :param credential_id: access key for credentials :returns: bool: success """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_owner(context, user_id, credential_id) self._assert_valid_user_id(context, user_id) self._get_credentials(context, credential_id) return self.ec2_api.delete_credential(context, credential_id) def _get_credentials(self, context, credential_id): """Return credentials from an ID. :param context: standard context :param credential_id: id of credential :raises exception.Unauthorized: when credential id is invalid :returns: credential: dict of ec2 credential. """ creds = self.ec2_api.get_credential(context, credential_id) if not creds: raise exception.Unauthorized(message='EC2 access key not found.') return creds def _assert_identity(self, context, user_id): """Check that the provided token belongs to the user. :param context: standard context :param user_id: id of user :raises exception.Forbidden: when token is invalid """ try: token_ref = self.token_api.get_token( context=context, token_id=context['token_id']) except exception.TokenNotFound: raise exception.Unauthorized() token_user_id = token_ref['user'].get('id') if not token_user_id == user_id: raise exception.Forbidden() def _is_admin(self, context): """Wrap admin assertion error return statement. :param context: standard context :returns: bool: success """ try: self.assert_admin(context) return True except exception.Forbidden: return False def _assert_owner(self, context, user_id, credential_id): """Ensure the provided user owns the credential. :param context: standard context :param user_id: expected credential owner :param credential_id: id of credential object :raises exception.Forbidden: on failure """ cred_ref = self.ec2_api.get_credential(context, credential_id) if not user_id == cred_ref['user_id']: raise exception.Forbidden() def _assert_valid_user_id(self, context, user_id): """Ensure a valid user id. :param context: standard context :param user_id: expected credential owner :raises exception.UserNotFound: on failure """ user_ref = self.identity_api.get_user( context=context, user_id=user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) def _assert_valid_tenant_id(self, context, tenant_id): """Ensure a valid tenant id. :param context: standard context :param user_id: expected credential owner :raises exception.UserNotFound: on failure """ tenant_ref = self.identity_api.get_tenant( context=context, tenant_id=tenant_id) if not tenant_ref: raise exception.TenantNotFound(tenant_id=tenant_id)
./CrossVul/dataset_final_sorted/CWE-255/py/good_3793_0
crossvul-python_data_bad_3788_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes import json from keystone import config from keystone import catalog from keystone.common import cms from keystone.common import logging from keystone.common import wsgi from keystone import exception from keystone import identity from keystone.openstack.common import timeutils from keystone import policy from keystone import token LOG = logging.getLogger(__name__) class V3Router(wsgi.ComposingRouter): def crud_routes(self, mapper, controller, collection_key, key): collection_path = '/%(collection_key)s' % { 'collection_key': collection_key} entity_path = '/%(collection_key)s/{%(key)s_id}' % { 'collection_key': collection_key, 'key': key} mapper.connect( collection_path, controller=controller, action='create_%s' % key, conditions=dict(method=['POST'])) mapper.connect( collection_path, controller=controller, action='list_%s' % collection_key, conditions=dict(method=['GET'])) mapper.connect( entity_path, controller=controller, action='get_%s' % key, conditions=dict(method=['GET'])) mapper.connect( entity_path, controller=controller, action='update_%s' % key, conditions=dict(method=['PATCH'])) mapper.connect( entity_path, controller=controller, action='delete_%s' % key, conditions=dict(method=['DELETE'])) def __init__(self): mapper = routes.Mapper() apis = dict( catalog_api=catalog.Manager(), identity_api=identity.Manager(), policy_api=policy.Manager(), token_api=token.Manager()) # Catalog self.crud_routes( mapper, catalog.ServiceControllerV3(**apis), 'services', 'service') self.crud_routes( mapper, catalog.EndpointControllerV3(**apis), 'endpoints', 'endpoint') # Identity self.crud_routes( mapper, identity.DomainControllerV3(**apis), 'domains', 'domain') project_controller = identity.ProjectControllerV3(**apis) self.crud_routes( mapper, project_controller, 'projects', 'project') mapper.connect( '/users/{user_id}/projects', controller=project_controller, action='list_user_projects', conditions=dict(method=['GET'])) self.crud_routes( mapper, identity.UserControllerV3(**apis), 'users', 'user') self.crud_routes( mapper, identity.CredentialControllerV3(**apis), 'credentials', 'credential') role_controller = identity.RoleControllerV3(**apis) self.crud_routes( mapper, role_controller, 'roles', 'role') mapper.connect( '/projects/{project_id}/users/{user_id}/roles/{role_id}', controller=role_controller, action='create_grant', conditions=dict(method=['PUT'])) mapper.connect( '/projects/{project_id}/users/{user_id}/roles/{role_id}', controller=role_controller, action='check_grant', conditions=dict(method=['HEAD'])) mapper.connect( '/projects/{project_id}/users/{user_id}/roles', controller=role_controller, action='list_grants', conditions=dict(method=['GET'])) mapper.connect( '/projects/{project_id}/users/{user_id}/roles/{role_id}', controller=role_controller, action='revoke_grant', conditions=dict(method=['DELETE'])) mapper.connect( '/domains/{domain_id}/users/{user_id}/roles/{role_id}', controller=role_controller, action='create_grant', conditions=dict(method=['PUT'])) mapper.connect( '/domains/{domain_id}/users/{user_id}/roles/{role_id}', controller=role_controller, action='check_grant', conditions=dict(method=['HEAD'])) mapper.connect( '/domains/{domain_id}/users/{user_id}/roles', controller=role_controller, action='list_grants', conditions=dict(method=['GET'])) mapper.connect( '/domains/{domain_id}/users/{user_id}/roles/{role_id}', controller=role_controller, action='revoke_grant', conditions=dict(method=['DELETE'])) # Policy policy_controller = policy.PolicyControllerV3(**apis) self.crud_routes( mapper, policy_controller, 'policies', 'policy') # Token """ # v2.0 LEGACY mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) """ super(V3Router, self).__init__(mapper, []) class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/revoked', controller=auth_controller, action='revocation_list', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Certificates used to verify auth tokens mapper.connect('/certificates/ca', controller=auth_controller, action='ca_cert', conditions=dict(method=['GET'])) mapper.connect('/certificates/signing', controller=auth_controller, action='signing_cert', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/certificates/ca', controller=auth_controller, action='ca_cert', conditions=dict(method=['GET'])) mapper.connect('/certificates/signing', controller=auth_controller, action='signing_cert', conditions=dict(method=['GET'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = '%sURL' % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog(context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { 'id': 'v2.0', 'status': 'beta', 'updated': '2011-11-19T00:00:00Z', 'links': [ { 'rel': 'self', 'href': identity_url, }, { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://docs.openstack.org/api/openstack-' 'identity-service/2.0/content/' }, { 'rel': 'describedby', 'type': 'application/pdf', 'href': 'http://docs.openstack.org/api/openstack-' 'identity-service/2.0/identity-dev-guide-' '2.0.pdf' } ], 'media-types': [ { 'base': 'application/json', 'type': 'application/vnd.openstack.identity-v2.0' '+json' }, { 'base': 'application/xml', 'type': 'application/vnd.openstack.identity-v2.0' '+xml' } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ 'versions': { 'values': versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ 'version': versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class ExternalAuthNotApplicable(Exception): """External authentication is not applicable""" class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def ca_cert(self, context, auth=None): ca_file = open(config.CONF.signing.ca_certs, 'r') data = ca_file.read() ca_file.close() return data def signing_cert(self, context, auth=None): cert_file = open(config.CONF.signing.certfile, 'r') data = cert_file.read() cert_file.close() return data def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ if auth is None: raise exception.ValidationError(attribute='auth', target='request body') auth_token_data = None if "token" in auth: # Try to authenticate using a token auth_token_data, auth_info = self._authenticate_token( context, auth) else: # Try external authentication try: auth_token_data, auth_info = self._authenticate_external( context, auth) except ExternalAuthNotApplicable: # Try local authentication auth_token_data, auth_info = self._authenticate_local( context, auth) user_ref, tenant_ref, metadata_ref = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): msg = 'User is disabled: %s' % user_ref['id'] LOG.warning(msg) raise exception.Unauthorized(msg) # If the tenant is disabled don't allow them to authenticate if tenant_ref and not tenant_ref.get('enabled', True): msg = 'Tenant is disabled: %s' % tenant_ref['id'] LOG.warning(msg) raise exception.Unauthorized(msg) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} auth_token_data['id'] = 'placeholder' roles_ref = [] for role_id in metadata_ref.get('roles', []): role_ref = self.identity_api.get_role(context, role_id) roles_ref.append(dict(name=role_ref['name'])) token_data = self._format_token(auth_token_data, roles_ref) service_catalog = self._format_catalog(catalog_ref) token_data['access']['serviceCatalog'] = service_catalog if config.CONF.signing.token_format == 'UUID': token_id = uuid.uuid4().hex elif config.CONF.signing.token_format == 'PKI': token_id = cms.cms_sign_token(json.dumps(token_data), config.CONF.signing.certfile, config.CONF.signing.keyfile) else: raise exception.UnexpectedError( 'Invalid value for token_format: %s.' ' Allowed values are PKI or UUID.' % config.CONF.signing.token_format) try: self.token_api.create_token( context, token_id, dict(key=token_id, id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) except Exception as e: # an identical token may have been created already. # if so, return the token_data as it is also identical try: self.token_api.get_token(context=context, token_id=token_id) except exception.TokenNotFound: raise e token_data['access']['token']['id'] = token_id return token_data def _authenticate_token(self, context, auth): """Try to authenticate using an already existing token. Returns auth_token_data, (user_ref, tenant_ref, metadata_ref) """ if 'token' not in auth: raise exception.ValidationError( attribute='token', target='auth') if "id" not in auth['token']: raise exception.ValidationError( attribute="id", target="token") old_token = auth['token']['id'] try: old_token_ref = self.token_api.get_token(context=context, token_id=old_token) except exception.NotFound as e: raise exception.Unauthorized(e) user_ref = old_token_ref['user'] user_id = user_ref['id'] current_user_ref = self.identity_api.get_user(context=context, user_id=user_id) tenant_id = self._get_tenant_id_from_auth(context, auth) tenant_ref = self._get_tenant_ref(context, user_id, tenant_id) metadata_ref = self._get_metadata_ref(context, user_id, tenant_id) expiry = old_token_ref['expires'] auth_token_data = self._get_auth_token_data(current_user_ref, tenant_ref, metadata_ref, expiry) return auth_token_data, (current_user_ref, tenant_ref, metadata_ref) def _authenticate_local(self, context, auth): """Try to authenticate against the identity backend. Returns auth_token_data, (user_ref, tenant_ref, metadata_ref) """ if 'passwordCredentials' not in auth: raise exception.ValidationError( attribute='passwordCredentials', target='auth') if "password" not in auth['passwordCredentials']: raise exception.ValidationError( attribute='password', target='passwordCredentials') password = auth['passwordCredentials']['password'] if ("userId" not in auth['passwordCredentials'] and "username" not in auth['passwordCredentials']): raise exception.ValidationError( attribute='username or userId', target='passwordCredentials') user_id = auth['passwordCredentials'].get('userId', None) username = auth['passwordCredentials'].get('username', '') if username: try: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) user_id = user_ref['id'] except exception.UserNotFound as e: raise exception.Unauthorized(e) tenant_id = self._get_tenant_id_from_auth(context, auth) try: auth_info = self.identity_api.authenticate( context=context, user_id=user_id, password=password, tenant_id=tenant_id) except AssertionError as e: raise exception.Unauthorized(e) (user_ref, tenant_ref, metadata_ref) = auth_info expiry = self.token_api._get_default_expire_time(context=context) auth_token_data = self._get_auth_token_data(user_ref, tenant_ref, metadata_ref, expiry) return auth_token_data, (user_ref, tenant_ref, metadata_ref) def _authenticate_external(self, context, auth): """Try to authenticate an external user via REMOTE_USER variable. Returns auth_token_data, (user_ref, tenant_ref, metadata_ref) """ if 'REMOTE_USER' not in context: raise ExternalAuthNotApplicable() username = context['REMOTE_USER'] try: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) user_id = user_ref['id'] except exception.UserNotFound as e: raise exception.Unauthorized(e) tenant_id = self._get_tenant_id_from_auth(context, auth) tenant_ref = self._get_tenant_ref(context, user_id, tenant_id) metadata_ref = self._get_metadata_ref(context, user_id, tenant_id) expiry = self.token_api._get_default_expire_time(context=context) auth_token_data = self._get_auth_token_data(user_ref, tenant_ref, metadata_ref, expiry) return auth_token_data, (user_ref, tenant_ref, metadata_ref) def _get_auth_token_data(self, user, tenant, metadata, expiry): return dict(dict(user=user, tenant=tenant, metadata=metadata, expires=expiry)) def _get_tenant_id_from_auth(self, context, auth): """Extract tenant information from auth dict. Returns a valid tenant_id if it exists, or None if not specified. """ tenant_id = auth.get('tenantId', None) tenant_name = auth.get('tenantName', None) if tenant_name: try: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] except exception.TenantNotFound as e: raise exception.Unauthorized(e) return tenant_id def _get_tenant_ref(self, context, user_id, tenant_id): """Returns the tenant_ref for the user's tenant""" tenant_ref = None if tenant_id: tenants = self.identity_api.get_tenants_for_user(context, user_id) if tenant_id not in tenants: msg = 'User %s is unauthorized for tenant %s' % ( user_id, tenant_id) LOG.warning(msg) raise exception.Unauthorized(msg) try: tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) except exception.TenantNotFound as e: exception.Unauthorized(e) return tenant_ref def _get_metadata_ref(self, context, user_id, tenant_id): """Returns the metadata_ref for a user in a tenant""" metadata_ref = {} if tenant_id: try: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_id, tenant_id=tenant_id) except exception.MetadataNotFound: metadata_ref = {} return metadata_ref def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) if cms.is_ans1_token(token_id): data = json.loads(cms.cms_verify(cms.token_to_cms(token_id), config.CONF.signing.certfile, config.CONF.signing.ca_certs)) data['access']['token']['user'] = data['access']['user'] data['access']['token']['metadata'] = data['access']['metadata'] if belongs_to: assert data['access']['token']['tenant']['id'] == belongs_to token_ref = data['access']['token'] else: token_ref = self.token_api.get_token(context=context, token_id=token_id) return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get('belongsTo') assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get('belongsTo') token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if possible # This is needed for on-behalf-of requests catalog_ref = None if token_ref.get('tenant'): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def revocation_list(self, context, auth=None): self.assert_admin(context) tokens = self.token_api.list_revoked_tokens(context) for t in tokens: expires = t['expires'] if not (expires and isinstance(expires, unicode)): t['expires'] = timeutils.isotime(expires) data = {'revoked': tokens} json_data = json.dumps(data) signed_text = cms.cms_sign_text(json_data, config.CONF.signing.certfile, config.CONF.signing.keyfile) return {'signed': signed_text} def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" self.assert_admin(context) token_ref = self._get_token_ref(context, token_id) catalog_ref = None if token_ref.get('tenant'): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=token_ref['metadata']) return self._format_endpoint_list(catalog_ref) def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: if not isinstance(expires, unicode): expires = timeutils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, 'issued_at': timeutils.strtime() }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) if metadata_ref: if 'is_admin' in metadata_ref: o['access']['metadata'] = {'is_admin': metadata_ref['is_admin']} else: o['access']['metadata'] = {'is_admin': 0} if 'roles' in metadata_ref: o['access']['metadata']['roles'] = metadata_ref['roles'] return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() def _format_endpoint_list(self, catalog_ref): """Formats a list of endpoints according to Identity API v2. The v2.0 API wants an endpoint list to look like:: { 'endpoints': [ { 'id': $endpoint_id, 'name': $SERVICE[name], 'type': $SERVICE, 'tenantId': $tenant_id, 'region': $REGION, } ], 'endpoints_links': [], } """ if not catalog_ref: return {} endpoints = [] for region_name, region_ref in catalog_ref.iteritems(): for service_type, service_ref in region_ref.iteritems(): endpoints.append({ 'id': service_ref.get('id'), 'name': service_ref.get('name'), 'type': service_type, 'region': region_name, 'publicURL': service_ref.get('publicURL'), 'internalURL': service_ref.get('internalURL'), 'adminURL': service_ref.get('adminURL'), }) return {'endpoints': endpoints, 'endpoints_links': []} class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': 'https://github.com/openstack/identity-api', } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter() @logging.fail_gracefully def v3_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return V3Router()
./CrossVul/dataset_final_sorted/CWE-255/py/bad_3788_0
crossvul-python_data_good_3791_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the EC2 Credentials service. This service allows the creation of access/secret credentials used for the ec2 interop layer of OpenStack. A user can create as many access/secret pairs, each of which map to a specific tenant. This is required because OpenStack supports a user belonging to multiple tenants, whereas the signatures created on ec2-style requests don't allow specification of which tenant the user wishs to act upon. To complete the cycle, we provide a method that OpenStack services can use to validate a signature and get a corresponding openstack token. This token allows method calls to other services within the context the access/secret was created. As an example, nova requests keystone to validate the signature of a request, receives a token, and then makes a request to glance to list images needed to perform the requested task. """ import uuid from keystone import catalog from keystone.common import manager from keystone.common import utils from keystone.common import wsgi from keystone import config from keystone import exception from keystone import identity from keystone import policy from keystone import service from keystone import token CONF = config.CONF class Manager(manager.Manager): """Default pivot point for the EC2 Credentials backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.ec2.driver) class Ec2Extension(wsgi.ExtensionRouter): def add_routes(self, mapper): ec2_controller = Ec2Controller() # validation mapper.connect( '/ec2tokens', controller=ec2_controller, action='authenticate', conditions=dict(method=['POST'])) # crud mapper.connect( '/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='create_credential', conditions=dict(method=['POST'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='get_credentials', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='get_credential', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='delete_credential', conditions=dict(method=['DELETE'])) class Ec2Controller(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() self.ec2_api = Manager() super(Ec2Controller, self).__init__() def check_signature(self, creds_ref, credentials): signer = utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) if utils.auth_str_equal(credentials['signature'], signature): return # NOTE(vish): Some libraries don't use the port when signing # requests, so try again without port. elif ':' in credentials['signature']: hostname, _port = credentials['host'].split(':') credentials['host'] = hostname signature = signer.generate(credentials) if not utils.auth_str_equal(credentials.signature, signature): raise exception.Unauthorized(message='Invalid EC2 signature.') else: raise exception.Unauthorized(message='EC2 signature not supplied.') def authenticate(self, context, credentials=None, ec2Credentials=None): """Validate a signed EC2 request and provide a token. Other services (such as Nova) use this **admin** call to determine if a request they signed received is from a valid user. If it is a valid signature, an openstack token that maps to the user/tenant is returned to the caller, along with all the other details returned from a normal token validation call. The returned token is useful for making calls to other OpenStack services within the context of the request. :param context: standard context :param credentials: dict of ec2 signature :param ec2Credentials: DEPRECATED dict of ec2 signature :returns: token: openstack token equivalent to access key along with the corresponding service catalog and roles """ # FIXME(ja): validate that a service token was used! # NOTE(termie): backwards compat hack if not credentials and ec2Credentials: credentials = ec2Credentials if not 'access' in credentials: raise exception.Unauthorized(message='EC2 signature not supplied.') creds_ref = self._get_credentials(context, credentials['access']) self.check_signature(creds_ref, credentials) # TODO(termie): don't create new tokens every time # TODO(termie): this is copied from TokenController.authenticate token_id = uuid.uuid4().hex tenant_ref = self.identity_api.get_tenant( context=context, tenant_id=creds_ref['tenant_id']) user_ref = self.identity_api.get_user( context=context, user_id=creds_ref['user_id']) metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles = metadata_ref.get('roles', []) if not roles: raise exception.Unauthorized(message='User not valid for tenant.') roles_ref = [self.identity_api.get_role(context, role_id) for role_id in roles] catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): make this a util function or something # TODO(termie): i don't think the ec2 middleware currently expects a # full return, but it contains a note saying that it # would be better to expect a full return token_controller = service.TokenController() return token_controller._format_authenticate( token_ref, roles_ref, catalog_ref) def create_credential(self, context, user_id, tenant_id): """Create a secret/access pair for use with ec2 style auth. Generates a new set of credentials that map the the user/tenant pair. :param context: standard context :param user_id: id of user :param tenant_id: id of tenant :returns: credential: dict of ec2 credential """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) self._assert_valid_tenant_id(context, tenant_id) cred_ref = {'user_id': user_id, 'tenant_id': tenant_id, 'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex} self.ec2_api.create_credential(context, cred_ref['access'], cred_ref) return {'credential': cred_ref} def get_credentials(self, context, user_id): """List all credentials for a user. :param context: standard context :param user_id: id of user :returns: credentials: list of ec2 credential dicts """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) return {'credentials': self.ec2_api.list_credentials(context, user_id)} def get_credential(self, context, user_id, credential_id): """Retreive a user's access/secret pair by the access key. Grab the full access/secret pair for a given access key. :param context: standard context :param user_id: id of user :param credential_id: access key for credentials :returns: credential: dict of ec2 credential """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) creds = self._get_credentials(context, credential_id) return {'credential': creds} def delete_credential(self, context, user_id, credential_id): """Delete a user's access/secret pair. Used to revoke a user's access/secret pair :param context: standard context :param user_id: id of user :param credential_id: access key for credentials :returns: bool: success """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_owner(context, user_id, credential_id) self._assert_valid_user_id(context, user_id) self._get_credentials(context, credential_id) return self.ec2_api.delete_credential(context, credential_id) def _get_credentials(self, context, credential_id): """Return credentials from an ID. :param context: standard context :param credential_id: id of credential :raises exception.Unauthorized: when credential id is invalid :returns: credential: dict of ec2 credential. """ creds = self.ec2_api.get_credential(context, credential_id) if not creds: raise exception.Unauthorized(message='EC2 access key not found.') return creds def _assert_identity(self, context, user_id): """Check that the provided token belongs to the user. :param context: standard context :param user_id: id of user :raises exception.Forbidden: when token is invalid """ try: token_ref = self.token_api.get_token( context=context, token_id=context['token_id']) except exception.TokenNotFound: raise exception.Unauthorized() token_user_id = token_ref['user'].get('id') if not token_user_id == user_id: raise exception.Forbidden() def _is_admin(self, context): """Wrap admin assertion error return statement. :param context: standard context :returns: bool: success """ try: self.assert_admin(context) return True except exception.Forbidden: return False def _assert_owner(self, context, user_id, credential_id): """Ensure the provided user owns the credential. :param context: standard context :param user_id: expected credential owner :param credential_id: id of credential object :raises exception.Forbidden: on failure """ cred_ref = self.ec2_api.get_credential(context, credential_id) if not user_id == cred_ref['user_id']: raise exception.Forbidden() def _assert_valid_user_id(self, context, user_id): """Ensure a valid user id. :param context: standard context :param user_id: expected credential owner :raises exception.UserNotFound: on failure """ user_ref = self.identity_api.get_user( context=context, user_id=user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) def _assert_valid_tenant_id(self, context, tenant_id): """Ensure a valid tenant id. :param context: standard context :param user_id: expected credential owner :raises exception.UserNotFound: on failure """ tenant_ref = self.identity_api.get_tenant( context=context, tenant_id=tenant_id) if not tenant_ref: raise exception.TenantNotFound(tenant_id=tenant_id)
./CrossVul/dataset_final_sorted/CWE-255/py/good_3791_0
crossvul-python_data_good_5024_0
#!/usr/bin/env python # -*- coding: utf-8 -*- """ | This file is part of the web2py Web Framework | Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Contains the classes for the global used variables: - Request - Response - Session """ from gluon.storage import Storage, List from gluon.streamer import streamer, stream_file_or_304_or_206, DEFAULT_CHUNK_SIZE from gluon.xmlrpc import handler from gluon.contenttype import contenttype from gluon.html import xmlescape, TABLE, TR, PRE, URL from gluon.http import HTTP, redirect from gluon.fileutils import up from gluon.serializers import json, custom_json import gluon.settings as settings from gluon.utils import web2py_uuid, secure_dumps, secure_loads from gluon.settings import global_settings from gluon import recfile from gluon.cache import CacheInRam from gluon.fileutils import copystream import hashlib import portalocker try: import cPickle as pickle except: import pickle from pickle import Pickler, MARK, DICT, EMPTY_DICT from types import DictionaryType import cStringIO import datetime import re import copy_reg import Cookie import os import sys import traceback import threading import cgi import urlparse import copy import tempfile FMT = '%a, %d-%b-%Y %H:%M:%S PST' PAST = 'Sat, 1-Jan-1971 00:00:00' FUTURE = 'Tue, 1-Dec-2999 23:59:59' try: from gluon.contrib.minify import minify have_minify = True except ImportError: have_minify = False try: import simplejson as sj # external installed library except: try: import json as sj # standard installed library except: import gluon.contrib.simplejson as sj # pure python library regex_session_id = re.compile('^([\w\-]+/)?[\w\-\.]+$') __all__ = ['Request', 'Response', 'Session'] current = threading.local() # thread-local storage for request-scope globals css_template = '<link href="%s" rel="stylesheet" type="text/css" />' js_template = '<script src="%s" type="text/javascript"></script>' coffee_template = '<script src="%s" type="text/coffee"></script>' typescript_template = '<script src="%s" type="text/typescript"></script>' less_template = '<link href="%s" rel="stylesheet/less" type="text/css" />' css_inline = '<style type="text/css">\n%s\n</style>' js_inline = '<script type="text/javascript">\n%s\n</script>' template_mapping = { 'css': css_template, 'js': js_template, 'coffee': coffee_template, 'ts': typescript_template, 'less': less_template, 'css:inline': css_inline, 'js:inline': js_inline } # IMPORTANT: # this is required so that pickled dict(s) and class.__dict__ # are sorted and web2py can detect without ambiguity when a session changes class SortingPickler(Pickler): def save_dict(self, obj): self.write(EMPTY_DICT if self.bin else MARK + DICT) self.memoize(obj) self._batch_setitems([(key, obj[key]) for key in sorted(obj)]) SortingPickler.dispatch = copy.copy(Pickler.dispatch) SortingPickler.dispatch[DictionaryType] = SortingPickler.save_dict def sorting_dumps(obj, protocol=None): file = cStringIO.StringIO() SortingPickler(file, protocol).dump(obj) return file.getvalue() # END ##################################################################### def copystream_progress(request, chunk_size=10 ** 5): """ Copies request.env.wsgi_input into request.body and stores progress upload status in cache_ram X-Progress-ID:length and X-Progress-ID:uploaded """ env = request.env if not env.get('CONTENT_LENGTH', None): return cStringIO.StringIO() source = env['wsgi.input'] try: size = int(env['CONTENT_LENGTH']) except ValueError: raise HTTP(400, "Invalid Content-Length header") try: # Android requires this dest = tempfile.NamedTemporaryFile() except NotImplementedError: # and GAE this dest = tempfile.TemporaryFile() if not 'X-Progress-ID' in request.get_vars: copystream(source, dest, size, chunk_size) return dest cache_key = 'X-Progress-ID:' + request.get_vars['X-Progress-ID'] cache_ram = CacheInRam(request) # same as cache.ram because meta_storage cache_ram(cache_key + ':length', lambda: size, 0) cache_ram(cache_key + ':uploaded', lambda: 0, 0) while size > 0: if size < chunk_size: data = source.read(size) cache_ram.increment(cache_key + ':uploaded', size) else: data = source.read(chunk_size) cache_ram.increment(cache_key + ':uploaded', chunk_size) length = len(data) if length > size: (data, length) = (data[:size], size) size -= length if length == 0: break dest.write(data) if length < chunk_size: break dest.seek(0) cache_ram(cache_key + ':length', None) cache_ram(cache_key + ':uploaded', None) return dest class Request(Storage): """ Defines the request object and the default values of its members - env: environment variables, by gluon.main.wsgibase() - cookies - get_vars - post_vars - vars - folder - application - function - args - extension - now: datetime.datetime.now() - utcnow : datetime.datetime.utcnow() - is_local - is_https - restful() """ def __init__(self, env): Storage.__init__(self) self.env = Storage(env) self.env.web2py_path = global_settings.applications_parent self.cookies = Cookie.SimpleCookie() self._get_vars = None self._post_vars = None self._vars = None self._body = None self.folder = None self.application = None self.function = None self.args = List() self.extension = 'html' self.now = datetime.datetime.now() self.utcnow = datetime.datetime.utcnow() self.is_restful = False self.is_https = False self.is_local = False self._uuid = None def parse_get_vars(self): """Takes the QUERY_STRING and unpacks it to get_vars """ query_string = self.env.get('query_string', '') dget = urlparse.parse_qs(query_string, keep_blank_values=1) # Ref: https://docs.python.org/2/library/cgi.html#cgi.parse_qs get_vars = self._get_vars = Storage(dget) for (key, value) in get_vars.iteritems(): if isinstance(value, list) and len(value) == 1: get_vars[key] = value[0] def parse_post_vars(self): """Takes the body of the request and unpacks it into post_vars. application/json is also automatically parsed """ env = self.env post_vars = self._post_vars = Storage() body = self.body # if content-type is application/json, we must read the body is_json = env.get('content_type', '')[:16] == 'application/json' if is_json: try: json_vars = sj.load(body) except: # incoherent request bodies can still be parsed "ad-hoc" json_vars = {} pass # update vars and get_vars with what was posted as json if isinstance(json_vars, dict): post_vars.update(json_vars) body.seek(0) # parse POST variables on POST, PUT, BOTH only in post_vars if (body and not is_json and env.request_method in ('POST', 'PUT', 'DELETE', 'BOTH')): query_string = env.pop('QUERY_STRING', None) dpost = cgi.FieldStorage(fp=body, environ=env, keep_blank_values=1) try: post_vars.update(dpost) except: pass if query_string is not None: env['QUERY_STRING'] = query_string # The same detection used by FieldStorage to detect multipart POSTs body.seek(0) def listify(a): return (not isinstance(a, list) and [a]) or a try: keys = sorted(dpost) except TypeError: keys = [] for key in keys: if key is None: continue # not sure why cgi.FieldStorage returns None key dpk = dpost[key] # if an element is not a file replace it with # its value else leave it alone pvalue = listify([(_dpk if _dpk.filename else _dpk.value) for _dpk in dpk] if isinstance(dpk, list) else (dpk if dpk.filename else dpk.value)) if len(pvalue): post_vars[key] = (len(pvalue) > 1 and pvalue) or pvalue[0] @property def body(self): if self._body is None: try: self._body = copystream_progress(self) except IOError: raise HTTP(400, "Bad Request - HTTP body is incomplete") return self._body def parse_all_vars(self): """Merges get_vars and post_vars to vars """ self._vars = copy.copy(self.get_vars) for key, value in self.post_vars.iteritems(): if key not in self._vars: self._vars[key] = value else: if not isinstance(self._vars[key], list): self._vars[key] = [self._vars[key]] self._vars[key] += value if isinstance(value, list) else [value] @property def get_vars(self): """Lazily parses the query string into get_vars """ if self._get_vars is None: self.parse_get_vars() return self._get_vars @property def post_vars(self): """Lazily parse the body into post_vars """ if self._post_vars is None: self.parse_post_vars() return self._post_vars @property def vars(self): """Lazily parses all get_vars and post_vars to fill vars """ if self._vars is None: self.parse_all_vars() return self._vars @property def uuid(self): """Lazily uuid """ if self._uuid is None: self.compute_uuid() return self._uuid def compute_uuid(self): self._uuid = '%s/%s.%s.%s' % ( self.application, self.client.replace(':', '_'), self.now.strftime('%Y-%m-%d.%H-%M-%S'), web2py_uuid()) return self._uuid def user_agent(self): from gluon.contrib import user_agent_parser session = current.session user_agent = session._user_agent if user_agent: return user_agent user_agent = user_agent_parser.detect(self.env.http_user_agent) for key, value in user_agent.items(): if isinstance(value, dict): user_agent[key] = Storage(value) user_agent = session._user_agent = Storage(user_agent) return user_agent def requires_https(self): """ If request comes in over HTTP, redirects it to HTTPS and secures the session. """ cmd_opts = global_settings.cmd_options # checking if this is called within the scheduler or within the shell # in addition to checking if it's not a cronjob if ((cmd_opts and (cmd_opts.shell or cmd_opts.scheduler)) or global_settings.cronjob or self.is_https): current.session.secure() else: current.session.forget() redirect(URL(scheme='https', args=self.args, vars=self.vars)) def restful(self): def wrapper(action, request=self): def f(_action=action, *a, **b): request.is_restful = True env = request.env is_json = env.content_type=='application/json' method = env.request_method if len(request.args) and '.' in request.args[-1]: request.args[-1], _, request.extension = request.args[-1].rpartition('.') current.response.headers['Content-Type'] = \ contenttype('.' + request.extension.lower()) rest_action = _action().get(method, None) if not (rest_action and method == method.upper() and callable(rest_action)): raise HTTP(405, "method not allowed") try: vars = request.vars if method == 'POST' and is_json: body = request.body.read() if len(body): vars = sj.loads(body) res = rest_action(*request.args, **vars) if is_json and not isinstance(res, str): res = json(res) return res except TypeError, e: exc_type, exc_value, exc_traceback = sys.exc_info() if len(traceback.extract_tb(exc_traceback)) == 1: raise HTTP(400, "invalid arguments") else: raise f.__doc__ = action.__doc__ f.__name__ = action.__name__ return f return wrapper class Response(Storage): """ Defines the response object and the default values of its members response.write( ) can be used to write in the output html """ def __init__(self): Storage.__init__(self) self.status = 200 self.headers = dict() self.headers['X-Powered-By'] = 'web2py' self.body = cStringIO.StringIO() self.session_id = None self.cookies = Cookie.SimpleCookie() self.postprocessing = [] self.flash = '' # used by the default view layout self.meta = Storage() # used by web2py_ajax.html self.menu = [] # used by the default view layout self.files = [] # used by web2py_ajax.html self._vars = None self._caller = lambda f: f() self._view_environment = None self._custom_commit = None self._custom_rollback = None self.generic_patterns = ['*'] self.delimiters = ('{{', '}}') self.formstyle = 'table3cols' self.form_label_separator = ': ' def write(self, data, escape=True): if not escape: self.body.write(str(data)) else: self.body.write(xmlescape(data)) def render(self, *a, **b): from compileapp import run_view_in if len(a) > 2: raise SyntaxError( 'Response.render can be called with two arguments, at most') elif len(a) == 2: (view, self._vars) = (a[0], a[1]) elif len(a) == 1 and isinstance(a[0], str): (view, self._vars) = (a[0], {}) elif len(a) == 1 and hasattr(a[0], 'read') and callable(a[0].read): (view, self._vars) = (a[0], {}) elif len(a) == 1 and isinstance(a[0], dict): (view, self._vars) = (None, a[0]) else: (view, self._vars) = (None, {}) self._vars.update(b) self._view_environment.update(self._vars) if view: import cStringIO (obody, oview) = (self.body, self.view) (self.body, self.view) = (cStringIO.StringIO(), view) run_view_in(self._view_environment) page = self.body.getvalue() self.body.close() (self.body, self.view) = (obody, oview) else: run_view_in(self._view_environment) page = self.body.getvalue() return page def include_meta(self): s = "\n" for meta in (self.meta or {}).iteritems(): k, v = meta if isinstance(v, dict): s += '<meta' + ''.join(' %s="%s"' % (xmlescape(key), xmlescape(v[key])) for key in v) +' />\n' else: s += '<meta name="%s" content="%s" />\n' % (k, xmlescape(v)) self.write(s, escape=False) def include_files(self, extensions=None): """ Includes files (usually in the head). Can minify and cache local files By default, caches in ram for 5 minutes. To change, response.cache_includes = (cache_method, time_expire). Example: (cache.disk, 60) # caches to disk for 1 minute. """ files = [] ext_files = [] has_js = has_css = False for item in self.files: if isinstance(item, (list, tuple)): ext_files.append(item) continue if extensions and not item.rpartition('.')[2] in extensions: continue if item in files: continue if item.endswith('.js'): has_js = True if item.endswith('.css'): has_css = True files.append(item) if have_minify and ((self.optimize_css and has_css) or (self.optimize_js and has_js)): # cache for 5 minutes by default key = hashlib.md5(repr(files)).hexdigest() cache = self.cache_includes or (current.cache.ram, 60 * 5) def call_minify(files=files): return minify.minify(files, URL('static', 'temp'), current.request.folder, self.optimize_css, self.optimize_js) if cache: cache_model, time_expire = cache files = cache_model('response.files.minified/' + key, call_minify, time_expire) else: files = call_minify() files.extend(ext_files) s = [] for item in files: if isinstance(item, str): f = item.lower().split('?')[0] ext = f.rpartition('.')[2] # if static_version we need also to check for # static_version_urls. In that case, the _.x.x.x # bit would have already been added by the URL() # function if self.static_version and not self.static_version_urls: item = item.replace( '/static/', '/static/_%s/' % self.static_version, 1) tmpl = template_mapping.get(ext) if tmpl: s.append(tmpl % item) elif isinstance(item, (list, tuple)): f = item[0] tmpl = template_mapping.get(f) if tmpl: s.append(tmpl % item[1]) self.write(''.join(s), escape=False) def stream(self, stream, chunk_size=DEFAULT_CHUNK_SIZE, request=None, attachment=False, filename=None ): """ If in a controller function:: return response.stream(file, 100) the file content will be streamed at 100 bytes at the time Args: stream: filename or read()able content chunk_size(int): Buffer size request: the request object attachment(bool): prepares the correct headers to download the file as an attachment. Usually creates a pop-up download window on browsers filename(str): the name for the attachment Note: for using the stream name (filename) with attachments the option must be explicitly set as function parameter (will default to the last request argument otherwise) """ headers = self.headers # for attachment settings and backward compatibility keys = [item.lower() for item in headers] if attachment: if filename is None: attname = "" else: attname = filename headers["Content-Disposition"] = \ 'attachment;filename="%s"' % attname if not request: request = current.request if isinstance(stream, (str, unicode)): stream_file_or_304_or_206(stream, chunk_size=chunk_size, request=request, headers=headers, status=self.status) # ## the following is for backward compatibility if hasattr(stream, 'name'): filename = stream.name if filename and not 'content-type' in keys: headers['Content-Type'] = contenttype(filename) if filename and not 'content-length' in keys: try: headers['Content-Length'] = \ os.path.getsize(filename) except OSError: pass env = request.env # Internet Explorer < 9.0 will not allow downloads over SSL unless caching is enabled if request.is_https and isinstance(env.http_user_agent, str) and \ not re.search(r'Opera', env.http_user_agent) and \ re.search(r'MSIE [5-8][^0-9]', env.http_user_agent): headers['Pragma'] = 'cache' headers['Cache-Control'] = 'private' if request and env.web2py_use_wsgi_file_wrapper: wrapped = env.wsgi_file_wrapper(stream, chunk_size) else: wrapped = streamer(stream, chunk_size=chunk_size) return wrapped def download(self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True, download_filename=None): """ Example of usage in controller:: def download(): return response.download(request, db) Downloads from http://..../download/filename """ from pydal.exceptions import NotAuthorizedException, NotFoundException current.session.forget(current.response) if not request.args: raise HTTP(404) name = request.args[-1] items = re.compile('(?P<table>.*?)\.(?P<field>.*?)\..*').match(name) if not items: raise HTTP(404) (t, f) = (items.group('table'), items.group('field')) try: field = db[t][f] except AttributeError: raise HTTP(404) try: (filename, stream) = field.retrieve(name, nameonly=True) except NotAuthorizedException: raise HTTP(403) except NotFoundException: raise HTTP(404) except IOError: raise HTTP(404) headers = self.headers headers['Content-Type'] = contenttype(name) if download_filename is None: download_filename = filename if attachment: headers['Content-Disposition'] = \ 'attachment; filename="%s"' % download_filename.replace('"', '\"') return self.stream(stream, chunk_size=chunk_size, request=request) def json(self, data, default=None): if 'Content-Type' not in self.headers: self.headers['Content-Type'] = 'application/json' return json(data, default=default or custom_json) def xmlrpc(self, request, methods): """ assuming:: def add(a, b): return a+b if a controller function \"func\":: return response.xmlrpc(request, [add]) the controller will be able to handle xmlrpc requests for the add function. Example:: import xmlrpclib connection = xmlrpclib.ServerProxy( 'http://hostname/app/contr/func') print connection.add(3, 4) """ return handler(request, self, methods) def toolbar(self): from gluon.html import DIV, SCRIPT, BEAUTIFY, TAG, A BUTTON = TAG.button admin = URL("admin", "default", "design", extension='html', args=current.request.application) from gluon.dal import DAL dbstats = [] dbtables = {} infos = DAL.get_instances() for k, v in infos.iteritems(): dbstats.append(TABLE(*[TR(PRE(row[0]), '%.2fms' % (row[1]*1000)) for row in v['dbstats']])) dbtables[k] = dict(defined=v['dbtables']['defined'] or '[no defined tables]', lazy=v['dbtables']['lazy'] or '[no lazy tables]') u = web2py_uuid() backtotop = A('Back to top', _href="#totop-%s" % u) # Convert lazy request.vars from property to Storage so they # will be displayed in the toolbar. request = copy.copy(current.request) request.update(vars=current.request.vars, get_vars=current.request.get_vars, post_vars=current.request.post_vars) return DIV( BUTTON('design', _onclick="document.location='%s'" % admin), BUTTON('request', _onclick="jQuery('#request-%s').slideToggle()" % u), BUTTON('response', _onclick="jQuery('#response-%s').slideToggle()" % u), BUTTON('session', _onclick="jQuery('#session-%s').slideToggle()" % u), BUTTON('db tables', _onclick="jQuery('#db-tables-%s').slideToggle()" % u), BUTTON('db stats', _onclick="jQuery('#db-stats-%s').slideToggle()" % u), DIV(BEAUTIFY(request), backtotop, _class="w2p-toolbar-hidden", _id="request-%s" % u), DIV(BEAUTIFY(current.session), backtotop, _class="w2p-toolbar-hidden", _id="session-%s" % u), DIV(BEAUTIFY(current.response), backtotop, _class="w2p-toolbar-hidden", _id="response-%s" % u), DIV(BEAUTIFY(dbtables), backtotop, _class="w2p-toolbar-hidden", _id="db-tables-%s" % u), DIV(BEAUTIFY(dbstats), backtotop, _class="w2p-toolbar-hidden", _id="db-stats-%s" % u), SCRIPT("jQuery('.w2p-toolbar-hidden').hide()"), _id="totop-%s" % u ) class Session(Storage): """ Defines the session object and the default values of its members (None) - session_storage_type : 'file', 'db', or 'cookie' - session_cookie_compression_level : - session_cookie_expires : cookie expiration - session_cookie_key : for encrypted sessions in cookies - session_id : a number or None if no session - session_id_name : - session_locked : - session_masterapp : - session_new : a new session obj is being created - session_hash : hash of the pickled loaded session - session_pickled : picked session if session in cookie: - session_data_name : name of the cookie for session data if session in db: - session_db_record_id - session_db_table - session_db_unique_key if session in file: - session_file - session_filename """ def connect(self, request=None, response=None, db=None, tablename='web2py_session', masterapp=None, migrate=True, separate=None, check_client=False, cookie_key=None, cookie_expires=None, compression_level=None ): """ Used in models, allows to customize Session handling Args: request: the request object response: the response object db: to store/retrieve sessions in db (a table is created) tablename(str): table name masterapp(str): points to another's app sessions. This enables a "SSO" environment among apps migrate: passed to the underlying db separate: with True, creates a folder with the 2 initials of the session id. Can also be a function, e.g. :: separate=lambda(session_name): session_name[-2:] check_client: if True, sessions can only come from the same ip cookie_key(str): secret for cookie encryption cookie_expires: sets the expiration of the cookie compression_level(int): 0-9, sets zlib compression on the data before the encryption """ from gluon.dal import Field request = request or current.request response = response or current.response masterapp = masterapp or request.application cookies = request.cookies self._unlock(response) response.session_masterapp = masterapp response.session_id_name = 'session_id_%s' % masterapp.lower() response.session_data_name = 'session_data_%s' % masterapp.lower() response.session_cookie_expires = cookie_expires response.session_client = str(request.client).replace(':', '.') response.session_cookie_key = cookie_key response.session_cookie_compression_level = compression_level # check if there is a session_id in cookies try: old_session_id = cookies[response.session_id_name].value except KeyError: old_session_id = None response.session_id = old_session_id # if we are supposed to use cookie based session data if cookie_key: response.session_storage_type = 'cookie' elif db: response.session_storage_type = 'db' else: response.session_storage_type = 'file' # why do we do this? # because connect may be called twice, by web2py and in models. # the first time there is no db yet so it should do nothing if (global_settings.db_sessions is True or masterapp in global_settings.db_sessions): return if response.session_storage_type == 'cookie': # check if there is session data in cookies if response.session_data_name in cookies: session_cookie_data = cookies[response.session_data_name].value else: session_cookie_data = None if session_cookie_data: data = secure_loads(session_cookie_data, cookie_key, compression_level=compression_level) if data: self.update(data) response.session_id = True # else if we are supposed to use file based sessions elif response.session_storage_type == 'file': response.session_new = False response.session_file = None # check if the session_id points to a valid sesion filename if response.session_id: if not regex_session_id.match(response.session_id): response.session_id = None else: response.session_filename = \ os.path.join(up(request.folder), masterapp, 'sessions', response.session_id) try: response.session_file = \ recfile.open(response.session_filename, 'rb+') portalocker.lock(response.session_file, portalocker.LOCK_EX) response.session_locked = True self.update(pickle.load(response.session_file)) response.session_file.seek(0) oc = response.session_filename.split('/')[-1].split('-')[0] if check_client and response.session_client != oc: raise Exception("cookie attack") except: response.session_id = None if not response.session_id: uuid = web2py_uuid() response.session_id = '%s-%s' % (response.session_client, uuid) separate = separate and (lambda session_name: session_name[-2:]) if separate: prefix = separate(response.session_id) response.session_id = '%s/%s' % (prefix, response.session_id) response.session_filename = \ os.path.join(up(request.folder), masterapp, 'sessions', response.session_id) response.session_new = True # else the session goes in db elif response.session_storage_type == 'db': if global_settings.db_sessions is not True: global_settings.db_sessions.add(masterapp) # if had a session on file alreday, close it (yes, can happen) if response.session_file: self._close(response) # if on GAE tickets go also in DB if settings.global_settings.web2py_runtime_gae: request.tickets_db = db if masterapp == request.application: table_migrate = migrate else: table_migrate = False tname = tablename + '_' + masterapp table = db.get(tname, None) # Field = db.Field if table is None: db.define_table( tname, Field('locked', 'boolean', default=False), Field('client_ip', length=64), Field('created_datetime', 'datetime', default=request.now), Field('modified_datetime', 'datetime'), Field('unique_key', length=64), Field('session_data', 'blob'), migrate=table_migrate, ) table = db[tname] # to allow for lazy table response.session_db_table = table if response.session_id: # Get session data out of the database try: (record_id, unique_key) = response.session_id.split(':') record_id = long(record_id) except (TypeError, ValueError): record_id = None # Select from database if record_id: row = table(record_id, unique_key=unique_key) # Make sure the session data exists in the database if row: # rows[0].update_record(locked=True) # Unpickle the data session_data = pickle.loads(row.session_data) self.update(session_data) response.session_new = False else: record_id = None if record_id: response.session_id = '%s:%s' % (record_id, unique_key) response.session_db_unique_key = unique_key response.session_db_record_id = record_id else: response.session_id = None response.session_new = True # if there is no session id yet, we'll need to create a # new session else: response.session_new = True # set the cookie now if you know the session_id so user can set # cookie attributes in controllers/models # cookie will be reset later # yet cookie may be reset later # Removed comparison between old and new session ids - should send # the cookie all the time if isinstance(response.session_id, str): response.cookies[response.session_id_name] = response.session_id response.cookies[response.session_id_name]['path'] = '/' if cookie_expires: response.cookies[response.session_id_name]['expires'] = \ cookie_expires.strftime(FMT) session_pickled = pickle.dumps(self, pickle.HIGHEST_PROTOCOL) response.session_hash = hashlib.md5(session_pickled).hexdigest() if self.flash: (response.flash, self.flash) = (self.flash, None) def renew(self, clear_session=False): if clear_session: self.clear() request = current.request response = current.response session = response.session masterapp = response.session_masterapp cookies = request.cookies if response.session_storage_type == 'cookie': return # if the session goes in file if response.session_storage_type == 'file': self._close(response) uuid = web2py_uuid() response.session_id = '%s-%s' % (response.session_client, uuid) separate = (lambda s: s[-2:]) if session and response.session_id[2:3] == "/" else None if separate: prefix = separate(response.session_id) response.session_id = '%s/%s' % \ (prefix, response.session_id) response.session_filename = \ os.path.join(up(request.folder), masterapp, 'sessions', response.session_id) response.session_new = True # else the session goes in db elif response.session_storage_type == 'db': table = response.session_db_table # verify that session_id exists if response.session_file: self._close(response) if response.session_new: return # Get session data out of the database if response.session_id is None: return (record_id, sep, unique_key) = response.session_id.partition(':') if record_id.isdigit() and long(record_id) > 0: new_unique_key = web2py_uuid() row = table(record_id) if row and row.unique_key == unique_key: table._db(table.id == record_id).update(unique_key=new_unique_key) else: record_id = None if record_id: response.session_id = '%s:%s' % (record_id, new_unique_key) response.session_db_record_id = record_id response.session_db_unique_key = new_unique_key else: response.session_new = True def _fixup_before_save(self): response = current.response rcookies = response.cookies scookies = rcookies.get(response.session_id_name) if not scookies: return if self._forget: del rcookies[response.session_id_name] return if self.get('httponly_cookies',True): scookies['HttpOnly'] = True if self._secure: scookies['secure'] = True def clear_session_cookies(self): request = current.request response = current.response session = response.session masterapp = response.session_masterapp cookies = request.cookies rcookies = response.cookies # if not cookie_key, but session_data_name in cookies # expire session_data_name from cookies if response.session_data_name in cookies: rcookies[response.session_data_name] = 'expired' rcookies[response.session_data_name]['path'] = '/' rcookies[response.session_data_name]['expires'] = PAST if response.session_id_name in rcookies: del rcookies[response.session_id_name] def save_session_id_cookie(self): request = current.request response = current.response session = response.session masterapp = response.session_masterapp cookies = request.cookies rcookies = response.cookies # if not cookie_key, but session_data_name in cookies # expire session_data_name from cookies if not response.session_cookie_key: if response.session_data_name in cookies: rcookies[response.session_data_name] = 'expired' rcookies[response.session_data_name]['path'] = '/' rcookies[response.session_data_name]['expires'] = PAST if response.session_id: rcookies[response.session_id_name] = response.session_id rcookies[response.session_id_name]['path'] = '/' expires = response.session_cookie_expires if isinstance(expires, datetime.datetime): expires = expires.strftime(FMT) if expires: rcookies[response.session_id_name]['expires'] = expires def clear(self): # see https://github.com/web2py/web2py/issues/735 response = current.response if response.session_storage_type == 'file': target = recfile.generate(response.session_filename) try: self._close(response) os.unlink(target) except: pass elif response.session_storage_type == 'db': table = response.session_db_table if response.session_id: (record_id, sep, unique_key) = response.session_id.partition(':') if record_id.isdigit() and long(record_id) > 0: table._db(table.id == record_id).delete() Storage.clear(self) def is_new(self): if self._start_timestamp: return False else: self._start_timestamp = datetime.datetime.today() return True def is_expired(self, seconds=3600): now = datetime.datetime.today() if not self._last_timestamp or \ self._last_timestamp + datetime.timedelta(seconds=seconds) > now: self._last_timestamp = now return False else: return True def secure(self): self._secure = True def forget(self, response=None): self._close(response) self._forget = True def _try_store_in_cookie(self, request, response): if self._forget or self._unchanged(response): # self.clear_session_cookies() self.save_session_id_cookie() return False name = response.session_data_name compression_level = response.session_cookie_compression_level value = secure_dumps(dict(self), response.session_cookie_key, compression_level=compression_level) rcookies = response.cookies rcookies.pop(name, None) rcookies[name] = value rcookies[name]['path'] = '/' expires = response.session_cookie_expires if isinstance(expires, datetime.datetime): expires = expires.strftime(FMT) if expires: rcookies[name]['expires'] = expires return True def _unchanged(self, response): session_pickled = pickle.dumps(self, pickle.HIGHEST_PROTOCOL) response.session_pickled = session_pickled session_hash = hashlib.md5(session_pickled).hexdigest() return response.session_hash == session_hash def _try_store_in_db(self, request, response): # don't save if file-based sessions, # no session id, or session being forgotten # or no changes to session (Unless the session is new) if (not response.session_db_table or self._forget or (self._unchanged(response) and not response.session_new)): if (not response.session_db_table and global_settings.db_sessions is not True and response.session_masterapp in global_settings.db_sessions): global_settings.db_sessions.remove(response.session_masterapp) # self.clear_session_cookies() self.save_session_id_cookie() return False table = response.session_db_table record_id = response.session_db_record_id if response.session_new: unique_key = web2py_uuid() else: unique_key = response.session_db_unique_key session_pickled = response.session_pickled or pickle.dumps(self, pickle.HIGHEST_PROTOCOL) dd = dict(locked=False, client_ip=response.session_client, modified_datetime=request.now, session_data=session_pickled, unique_key=unique_key) if record_id: if not table._db(table.id == record_id).update(**dd): record_id = None if not record_id: record_id = table.insert(**dd) response.session_id = '%s:%s' % (record_id, unique_key) response.session_db_unique_key = unique_key response.session_db_record_id = record_id self.save_session_id_cookie() return True def _try_store_in_cookie_or_file(self, request, response): if response.session_storage_type == 'file': return self._try_store_in_file(request, response) if response.session_storage_type == 'cookie': return self._try_store_in_cookie(request, response) def _try_store_in_file(self, request, response): try: if (not response.session_id or self._forget or self._unchanged(response)): # self.clear_session_cookies() self.save_session_id_cookie() return False if response.session_new or not response.session_file: # Tests if the session sub-folder exists, if not, create it session_folder = os.path.dirname(response.session_filename) if not os.path.exists(session_folder): os.mkdir(session_folder) response.session_file = recfile.open(response.session_filename, 'wb') portalocker.lock(response.session_file, portalocker.LOCK_EX) response.session_locked = True if response.session_file: session_pickled = response.session_pickled or pickle.dumps(self, pickle.HIGHEST_PROTOCOL) response.session_file.write(session_pickled) response.session_file.truncate() finally: self._close(response) self.save_session_id_cookie() return True def _unlock(self, response): if response and response.session_file and response.session_locked: try: portalocker.unlock(response.session_file) response.session_locked = False except: # this should never happen but happens in Windows pass def _close(self, response): if response and response.session_file: self._unlock(response) try: response.session_file.close() del response.session_file except: pass def pickle_session(s): return Session, (dict(s),) copy_reg.pickle(Session, pickle_session)
./CrossVul/dataset_final_sorted/CWE-255/py/good_5024_0
crossvul-python_data_bad_741_1
"""Parse (absolute and relative) URLs. urlparse module is based upon the following RFC specifications. RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding and L. Masinter, January 2005. RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter and L.Masinter, December 1999. RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. Berners-Lee, R. Fielding, and L. Masinter, August 1998. RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June 1995. RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. McCahill, December 1994 RFC 3986 is considered the current standard and any future changes to urlparse module should conform with it. The urlparse module is currently not entirely compliant with this RFC due to defacto scenarios for parsing, and for backward compatibility purposes, some parsing quirks from older RFCs are retained. The testcases in test_urlparse.py provides a good indicator of parsing behavior. """ import re import sys import collections __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", "urlsplit", "urlunsplit", "urlencode", "parse_qs", "parse_qsl", "quote", "quote_plus", "quote_from_bytes", "unquote", "unquote_plus", "unquote_to_bytes", "DefragResult", "ParseResult", "SplitResult", "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] # A classification of schemes. # The empty string classifies URLs with no scheme specified, # being the default value returned by “urlsplit” and “urlparse”. uses_relative = ['', 'ftp', 'http', 'gopher', 'nntp', 'imap', 'wais', 'file', 'https', 'shttp', 'mms', 'prospero', 'rtsp', 'rtspu', 'sftp', 'svn', 'svn+ssh', 'ws', 'wss'] uses_netloc = ['', 'ftp', 'http', 'gopher', 'nntp', 'telnet', 'imap', 'wais', 'file', 'mms', 'https', 'shttp', 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh', 'ws', 'wss'] uses_params = ['', 'ftp', 'hdl', 'prospero', 'http', 'imap', 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', 'mms', 'sftp', 'tel'] # These are not actually used anymore, but should stay for backwards # compatibility. (They are undocumented, but have a public-looking name.) non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] uses_query = ['', 'http', 'wais', 'imap', 'https', 'shttp', 'mms', 'gopher', 'rtsp', 'rtspu', 'sip', 'sips'] uses_fragment = ['', 'ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais', 'https', 'shttp', 'snews', 'file', 'prospero'] # Characters valid in scheme names scheme_chars = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789' '+-.') # XXX: Consider replacing with functools.lru_cache MAX_CACHE_SIZE = 20 _parse_cache = {} def clear_cache(): """Clear the parse cache and the quoters cache.""" _parse_cache.clear() _safe_quoters.clear() # Helpers for bytes handling # For 3.2, we deliberately require applications that # handle improperly quoted URLs to do their own # decoding and encoding. If valid use cases are # presented, we may relax this by using latin-1 # decoding internally for 3.3 _implicit_encoding = 'ascii' _implicit_errors = 'strict' def _noop(obj): return obj def _encode_result(obj, encoding=_implicit_encoding, errors=_implicit_errors): return obj.encode(encoding, errors) def _decode_args(args, encoding=_implicit_encoding, errors=_implicit_errors): return tuple(x.decode(encoding, errors) if x else '' for x in args) def _coerce_args(*args): # Invokes decode if necessary to create str args # and returns the coerced inputs along with # an appropriate result coercion function # - noop for str inputs # - encoding function otherwise str_input = isinstance(args[0], str) for arg in args[1:]: # We special-case the empty string to support the # "scheme=''" default argument to some functions if arg and isinstance(arg, str) != str_input: raise TypeError("Cannot mix str and non-str arguments") if str_input: return args + (_noop,) return _decode_args(args) + (_encode_result,) # Result objects are more helpful than simple tuples class _ResultMixinStr(object): """Standard approach to encoding parsed results from str to bytes""" __slots__ = () def encode(self, encoding='ascii', errors='strict'): return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) class _ResultMixinBytes(object): """Standard approach to decoding parsed results from bytes to str""" __slots__ = () def decode(self, encoding='ascii', errors='strict'): return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) class _NetlocResultMixinBase(object): """Shared methods for the parsed result objects containing a netloc element""" __slots__ = () @property def username(self): return self._userinfo[0] @property def password(self): return self._userinfo[1] @property def hostname(self): hostname = self._hostinfo[0] if not hostname: return None # Scoped IPv6 address may have zone info, which must not be lowercased # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys separator = '%' if isinstance(hostname, str) else b'%' hostname, percent, zone = hostname.partition(separator) return hostname.lower() + percent + zone @property def port(self): port = self._hostinfo[1] if port is not None: port = int(port, 10) if not ( 0 <= port <= 65535): raise ValueError("Port out of range 0-65535") return port class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): __slots__ = () @property def _userinfo(self): netloc = self.netloc userinfo, have_info, hostinfo = netloc.rpartition('@') if have_info: username, have_password, password = userinfo.partition(':') if not have_password: password = None else: username = password = None return username, password @property def _hostinfo(self): netloc = self.netloc _, _, hostinfo = netloc.rpartition('@') _, have_open_br, bracketed = hostinfo.partition('[') if have_open_br: hostname, _, port = bracketed.partition(']') _, _, port = port.partition(':') else: hostname, _, port = hostinfo.partition(':') if not port: port = None return hostname, port class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): __slots__ = () @property def _userinfo(self): netloc = self.netloc userinfo, have_info, hostinfo = netloc.rpartition(b'@') if have_info: username, have_password, password = userinfo.partition(b':') if not have_password: password = None else: username = password = None return username, password @property def _hostinfo(self): netloc = self.netloc _, _, hostinfo = netloc.rpartition(b'@') _, have_open_br, bracketed = hostinfo.partition(b'[') if have_open_br: hostname, _, port = bracketed.partition(b']') _, _, port = port.partition(b':') else: hostname, _, port = hostinfo.partition(b':') if not port: port = None return hostname, port from collections import namedtuple _DefragResultBase = namedtuple('DefragResult', 'url fragment') _SplitResultBase = namedtuple( 'SplitResult', 'scheme netloc path query fragment') _ParseResultBase = namedtuple( 'ParseResult', 'scheme netloc path params query fragment') _DefragResultBase.__doc__ = """ DefragResult(url, fragment) A 2-tuple that contains the url without fragment identifier and the fragment identifier as a separate argument. """ _DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" _DefragResultBase.fragment.__doc__ = """ Fragment identifier separated from URL, that allows indirect identification of a secondary resource by reference to a primary resource and additional identifying information. """ _SplitResultBase.__doc__ = """ SplitResult(scheme, netloc, path, query, fragment) A 5-tuple that contains the different components of a URL. Similar to ParseResult, but does not split params. """ _SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" _SplitResultBase.netloc.__doc__ = """ Network location where the request is made to. """ _SplitResultBase.path.__doc__ = """ The hierarchical path, such as the path to a file to download. """ _SplitResultBase.query.__doc__ = """ The query component, that contains non-hierarchical data, that along with data in path component, identifies a resource in the scope of URI's scheme and network location. """ _SplitResultBase.fragment.__doc__ = """ Fragment identifier, that allows indirect identification of a secondary resource by reference to a primary resource and additional identifying information. """ _ParseResultBase.__doc__ = """ ParseResult(scheme, netloc, path, params, query, fragment) A 6-tuple that contains components of a parsed URL. """ _ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ _ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ _ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ _ParseResultBase.params.__doc__ = """ Parameters for last path element used to dereference the URI in order to provide access to perform some operation on the resource. """ _ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ _ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ # For backwards compatibility, alias _NetlocResultMixinStr # ResultBase is no longer part of the documented API, but it is # retained since deprecating it isn't worth the hassle ResultBase = _NetlocResultMixinStr # Structured result objects for string data class DefragResult(_DefragResultBase, _ResultMixinStr): __slots__ = () def geturl(self): if self.fragment: return self.url + '#' + self.fragment else: return self.url class SplitResult(_SplitResultBase, _NetlocResultMixinStr): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResult(_ParseResultBase, _NetlocResultMixinStr): __slots__ = () def geturl(self): return urlunparse(self) # Structured result objects for bytes data class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): __slots__ = () def geturl(self): if self.fragment: return self.url + b'#' + self.fragment else: return self.url class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): __slots__ = () def geturl(self): return urlunparse(self) # Set up the encode/decode result pairs def _fix_result_transcoding(): _result_pairs = ( (DefragResult, DefragResultBytes), (SplitResult, SplitResultBytes), (ParseResult, ParseResultBytes), ) for _decoded, _encoded in _result_pairs: _decoded._encoded_counterpart = _encoded _encoded._decoded_counterpart = _decoded _fix_result_transcoding() del _fix_result_transcoding def urlparse(url, scheme='', allow_fragments=True): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) splitresult = urlsplit(url, scheme, allow_fragments) scheme, netloc, url, query, fragment = splitresult if scheme in uses_params and ';' in url: url, params = _splitparams(url) else: params = '' result = ParseResult(scheme, netloc, url, params, query, fragment) return _coerce_result(result) def _splitparams(url): if '/' in url: i = url.find(';', url.rfind('/')) if i < 0: return url, '' else: i = url.find(';') return url[:i], url[i+1:] def _splitnetloc(url, start=0): delim = len(url) # position of end of domain part of url, default is end for c in '/?#': # look for delimiters; the order is NOT important wdelim = url.find(c, start) # find first of this delim if wdelim >= 0: # if found delim = min(delim, wdelim) # use earliest delim position return url[start:delim], url[delim:] # return (domain, rest) def _checknetloc(netloc): if not netloc or netloc.isascii(): return # looking for characters like \u2100 that expand to 'a/c' # IDNA uses NFKC equivalence, so normalize for this check import unicodedata n = netloc.rpartition('@')[2] # ignore anything to the left of '@' n = n.replace(':', '') # ignore characters already included n = n.replace('#', '') # but not the surrounding text n = n.replace('?', '') netloc2 = unicodedata.normalize('NFKC', n) if n == netloc2: return for c in '/?#@:': if c in netloc2: raise ValueError("netloc '" + netloc + "' contains invalid " + "characters under NFKC normalization") def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) allow_fragments = bool(allow_fragments) key = url, scheme, allow_fragments, type(url), type(scheme) cached = _parse_cache.get(key, None) if cached: return _coerce_result(cached) if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case url = url[i+1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult('http', netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) for c in url[:i]: if c not in scheme_chars: break else: # make sure "url" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i+1:] if not rest or any(c not in '0123456789' for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) def urlunparse(components): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" scheme, netloc, url, params, query, fragment, _coerce_result = ( _coerce_args(*components)) if params: url = "%s;%s" % (url, params) return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) def urlunsplit(components): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" scheme, netloc, url, query, fragment, _coerce_result = ( _coerce_args(*components)) if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if query: url = url + '?' + query if fragment: url = url + '#' + fragment return _coerce_result(url) def urljoin(base, url, allow_fragments=True): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url if not url: return base base, url, _coerce_result = _coerce_args(base, url) bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return _coerce_result(url) if scheme in uses_netloc: if netloc: return _coerce_result(urlunparse((scheme, netloc, path, params, query, fragment))) netloc = bnetloc if not path and not params: path = bpath params = bparams if not query: query = bquery return _coerce_result(urlunparse((scheme, netloc, path, params, query, fragment))) base_parts = bpath.split('/') if base_parts[-1] != '': # the last item is not a directory, so will not be taken into account # in resolving the relative path del base_parts[-1] # for rfc3986, ignore all base path should the first character be root. if path[:1] == '/': segments = path.split('/') else: segments = base_parts + path.split('/') # filter out elements that would cause redundant slashes on re-joining # the resolved_path segments[1:-1] = filter(None, segments[1:-1]) resolved_path = [] for seg in segments: if seg == '..': try: resolved_path.pop() except IndexError: # ignore any .. segments that would otherwise cause an IndexError # when popped from resolved_path if resolving for rfc3986 pass elif seg == '.': continue else: resolved_path.append(seg) if segments[-1] in ('.', '..'): # do some post-processing here. if the last segment was a relative dir, # then we need to append the trailing '/' resolved_path.append('') return _coerce_result(urlunparse((scheme, netloc, '/'.join( resolved_path) or '/', params, query, fragment))) def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ url, _coerce_result = _coerce_args(url) if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) else: frag = '' defrag = url return _coerce_result(DefragResult(defrag, frag)) _hexdig = '0123456789ABCDEFabcdef' _hextobyte = None def unquote_to_bytes(string): """unquote_to_bytes('abc%20def') -> b'abc def'.""" # Note: strings are encoded as UTF-8. This is only an issue if it contains # unescaped non-ASCII characters, which URIs should not. if not string: # Is it a string-like object? string.split return b'' if isinstance(string, str): string = string.encode('utf-8') bits = string.split(b'%') if len(bits) == 1: return string res = [bits[0]] append = res.append # Delay the initialization of the table to not waste memory # if the function is never called global _hextobyte if _hextobyte is None: _hextobyte = {(a + b).encode(): bytes.fromhex(a + b) for a in _hexdig for b in _hexdig} for item in bits[1:]: try: append(_hextobyte[item[:2]]) append(item[2:]) except KeyError: append(b'%') append(item) return b''.join(res) _asciire = re.compile('([\x00-\x7f]+)') def unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res) def parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. encoding and errors: specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). Returns a dictionary. """ parsed_result = {} pairs = parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors, max_num_fields=max_num_fields) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. encoding and errors: specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). Returns a list, as G-d intended. """ qs, _coerce_result = _coerce_args(qs) # If max_num_fields is defined then check that the number of fields # is less than max_num_fields. This prevents a memory exhaustion DOS # attack via post bodies with many fields. if max_num_fields is not None: num_fields = 1 + qs.count('&') + qs.count(';') if max_num_fields < num_fields: raise ValueError('Max number of fields exceeded') pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def unquote_plus(string, encoding='utf-8', errors='replace'): """Like unquote(), but also replace plus signs by spaces, as required for unquoting HTML form values. unquote_plus('%7e/abc+def') -> '~/abc def' """ string = string.replace('+', ' ') return unquote(string, encoding, errors) _ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' b'abcdefghijklmnopqrstuvwxyz' b'0123456789' b'_.-~') _ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) _safe_quoters = {} class Quoter(collections.defaultdict): """A mapping from bytes (in range(0,256)) to strings. String values are percent-encoded byte values, unless the key < 128, and in the "safe" set (either the specified safe set, or default set). """ # Keeps a cache internally, using defaultdict, for efficiency (lookups # of cached keys don't call Python code at all). def __init__(self, safe): """safe: bytes object.""" self.safe = _ALWAYS_SAFE.union(safe) def __repr__(self): # Without this, will just display as a defaultdict return "<%s %r>" % (self.__class__.__name__, dict(self)) def __missing__(self, b): # Handle a cache miss. Store quoted string in cache and return. res = chr(b) if b in self.safe else '%{:02X}'.format(b) self[b] = res return res def quote(string, safe='/', encoding=None, errors=None): """quote('abc def') -> 'abc%20def' Each part of a URL, e.g. the path info, the query, etc., has a different set of reserved characters that must be quoted. The quote function offers a cautious (not minimal) way to quote a string for most of these parts. RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists the following (un)reserved characters. unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" reserved = gen-delims / sub-delims gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" Each of the reserved characters is reserved in some component of a URL, but not necessarily in all of them. The quote function %-escapes all characters that are neither in the unreserved chars ("always safe") nor the additional chars set via the safe arg. The default for the safe arg is '/'. The character is reserved, but in typical usage the quote function is being called on a path where the existing slash characters are to be preserved. Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. Now, "~" is included in the set of unreserved characters. string and safe may be either str or bytes objects. encoding and errors must not be specified if string is a bytes object. The optional encoding and errors parameters specify how to deal with non-ASCII characters, as accepted by the str.encode method. By default, encoding='utf-8' (characters are encoded with UTF-8), and errors='strict' (unsupported characters raise a UnicodeEncodeError). """ if isinstance(string, str): if not string: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'strict' string = string.encode(encoding, errors) else: if encoding is not None: raise TypeError("quote() doesn't support 'encoding' for bytes") if errors is not None: raise TypeError("quote() doesn't support 'errors' for bytes") return quote_from_bytes(string, safe) def quote_plus(string, safe='', encoding=None, errors=None): """Like quote(), but also replace ' ' with '+', as required for quoting HTML form values. Plus signs in the original string are escaped unless they are included in safe. It also does not have safe default to '/'. """ # Check if ' ' in string, where string may either be a str or bytes. If # there are no spaces, the regular quote will produce the right answer. if ((isinstance(string, str) and ' ' not in string) or (isinstance(string, bytes) and b' ' not in string)): return quote(string, safe, encoding, errors) if isinstance(safe, str): space = ' ' else: space = b' ' string = quote(string, safe + space, encoding, errors) return string.replace(' ', '+') def quote_from_bytes(bs, safe='/'): """Like quote(), but accepts a bytes object rather than a str, and does not perform string-to-bytes encoding. It always returns an ASCII string. quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' """ if not isinstance(bs, (bytes, bytearray)): raise TypeError("quote_from_bytes() expected bytes") if not bs: return '' if isinstance(safe, str): # Normalize 'safe' by converting to bytes and removing non-ASCII chars safe = safe.encode('ascii', 'ignore') else: safe = bytes([c for c in safe if c < 128]) if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): return bs.decode() try: quoter = _safe_quoters[safe] except KeyError: _safe_quoters[safe] = quoter = Quoter(safe).__getitem__ return ''.join([quoter(char) for char in bs]) def urlencode(query, doseq=False, safe='', encoding=None, errors=None, quote_via=quote_plus): """Encode a dict or sequence of two-element tuples into a URL query string. If any values in the query arg are sequences and doseq is true, each sequence element is converted to a separate parameter. If the query arg is a sequence of two-element tuples, the order of the parameters in the output will match the order of parameters in the input. The components of a query arg may each be either a string or a bytes type. The safe, encoding, and errors parameters are passed down to the function specified by quote_via (encoding and errors only if a component is a str). """ if hasattr(query, "items"): query = query.items() else: # It's a bother at times that strings and string-like objects are # sequences. try: # non-sequence items should not work with len() # non-empty strings will fail this if len(query) and not isinstance(query[0], tuple): raise TypeError # Zero-length sequences of all types will get here and succeed, # but that's a minor nit. Since the original implementation # allowed empty dicts that type of behavior probably should be # preserved for consistency except TypeError: ty, va, tb = sys.exc_info() raise TypeError("not a valid non-string sequence " "or mapping object").with_traceback(tb) l = [] if not doseq: for k, v in query: if isinstance(k, bytes): k = quote_via(k, safe) else: k = quote_via(str(k), safe, encoding, errors) if isinstance(v, bytes): v = quote_via(v, safe) else: v = quote_via(str(v), safe, encoding, errors) l.append(k + '=' + v) else: for k, v in query: if isinstance(k, bytes): k = quote_via(k, safe) else: k = quote_via(str(k), safe, encoding, errors) if isinstance(v, bytes): v = quote_via(v, safe) l.append(k + '=' + v) elif isinstance(v, str): v = quote_via(v, safe, encoding, errors) l.append(k + '=' + v) else: try: # Is this a sufficient test for sequence-ness? x = len(v) except TypeError: # not a sequence v = quote_via(str(v), safe, encoding, errors) l.append(k + '=' + v) else: # loop over the sequence for elt in v: if isinstance(elt, bytes): elt = quote_via(elt, safe) else: elt = quote_via(str(elt), safe, encoding, errors) l.append(k + '=' + elt) return '&'.join(l) def to_bytes(url): """to_bytes(u"URL") --> 'URL'.""" # Most URL schemes require ASCII. If that changes, the conversion # can be relaxed. # XXX get rid of to_bytes() if isinstance(url, str): try: url = url.encode("ASCII").decode() except UnicodeError: raise UnicodeError("URL " + repr(url) + " contains non-ASCII characters") return url def unwrap(url): """unwrap('<URL:type://host/path>') --> 'type://host/path'.""" url = str(url).strip() if url[:1] == '<' and url[-1:] == '>': url = url[1:-1].strip() if url[:4] == 'URL:': url = url[4:].strip() return url _typeprog = None def splittype(url): """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" global _typeprog if _typeprog is None: _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) match = _typeprog.match(url) if match: scheme, data = match.groups() return scheme.lower(), data return None, url _hostprog = None def splithost(url): """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" global _hostprog if _hostprog is None: _hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL) match = _hostprog.match(url) if match: host_port, path = match.groups() if path and path[0] != '/': path = '/' + path return host_port, path return None, url def splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" user, delim, host = host.rpartition('@') return (user if delim else None), host def splitpasswd(user): """splitpasswd('user:passwd') -> 'user', 'passwd'.""" user, delim, passwd = user.partition(':') return user, (passwd if delim else None) # splittag('/path#tag') --> '/path', 'tag' _portprog = None def splitport(host): """splitport('host:port') --> 'host', 'port'.""" global _portprog if _portprog is None: _portprog = re.compile('(.*):([0-9]*)$', re.DOTALL) match = _portprog.match(host) if match: host, port = match.groups() if port: return host, port return host, None def splitnport(host, defport=-1): """Split host and port, returning numeric port. Return given default port if no ':' found; defaults to -1. Return numerical port if a valid number are found after ':'. Return None if ':' but not a valid number.""" host, delim, port = host.rpartition(':') if not delim: host = port elif port: try: nport = int(port) except ValueError: nport = None return host, nport return host, defport def splitquery(url): """splitquery('/path?query') --> '/path', 'query'.""" path, delim, query = url.rpartition('?') if delim: return path, query return url, None def splittag(url): """splittag('/path#tag') --> '/path', 'tag'.""" path, delim, tag = url.rpartition('#') if delim: return path, tag return url, None def splitattr(url): """splitattr('/path;attr1=value1;attr2=value2;...') -> '/path', ['attr1=value1', 'attr2=value2', ...].""" words = url.split(';') return words[0], words[1:] def splitvalue(attr): """splitvalue('attr=value') --> 'attr', 'value'.""" attr, delim, value = attr.partition('=') return attr, (value if delim else None)
./CrossVul/dataset_final_sorted/CWE-255/py/bad_741_1
crossvul-python_data_bad_3789_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes import json from keystone import config from keystone import catalog from keystone.common import cms from keystone.common import logging from keystone.common import wsgi from keystone import exception from keystone import identity from keystone.openstack.common import timeutils from keystone import policy from keystone import token LOG = logging.getLogger(__name__) class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/revoked', controller=auth_controller, action='revocation_list', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Certificates used to verify auth tokens mapper.connect('/certificates/ca', controller=auth_controller, action='ca_cert', conditions=dict(method=['GET'])) mapper.connect('/certificates/signing', controller=auth_controller, action='signing_cert', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/certificates/ca', controller=auth_controller, action='ca_cert', conditions=dict(method=['GET'])) mapper.connect('/certificates/signing', controller=auth_controller, action='signing_cert', conditions=dict(method=['GET'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = '%sURL' % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog(context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { 'id': 'v2.0', 'status': 'beta', 'updated': '2011-11-19T00:00:00Z', 'links': [ { 'rel': 'self', 'href': identity_url, }, { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://docs.openstack.org/api/openstack-' 'identity-service/2.0/content/' }, { 'rel': 'describedby', 'type': 'application/pdf', 'href': 'http://docs.openstack.org/api/openstack-' 'identity-service/2.0/identity-dev-guide-' '2.0.pdf' } ], 'media-types': [ { 'base': 'application/json', 'type': 'application/vnd.openstack.identity-v2.0' '+json' }, { 'base': 'application/xml', 'type': 'application/vnd.openstack.identity-v2.0' '+xml' } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ 'versions': { 'values': versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ 'version': versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def ca_cert(self, context, auth=None): ca_file = open(config.CONF.signing.ca_certs, 'r') data = ca_file.read() ca_file.close() return data def signing_cert(self, context, auth=None): cert_file = open(config.CONF.signing.certfile, 'r') data = cert_file.read() cert_file.close() return data def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ if 'passwordCredentials' in auth: user_id = auth['passwordCredentials'].get('userId', None) username = auth['passwordCredentials'].get('username', '') password = auth['passwordCredentials'].get('password', '') tenant_name = auth.get('tenantName', None) if username: try: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) user_id = user_ref['id'] except exception.UserNotFound: raise exception.Unauthorized() # more compat tenant_id = auth.get('tenantId', None) if tenant_name: try: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] except exception.TenantNotFound: raise exception.Unauthorized() try: auth_info = self.identity_api.authenticate(context=context, user_id=user_id, password=password, tenant_id=tenant_id) (user_ref, tenant_ref, metadata_ref) = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_id) raise exception.Unauthorized() # If the tenant is disabled don't allow them to authenticate if tenant_ref and not tenant_ref.get('enabled', True): LOG.warning('Tenant %s is disabled' % tenant_id) raise exception.Unauthorized() except AssertionError as e: raise exception.Unauthorized(e.message) auth_token_data = dict(zip(['user', 'tenant', 'metadata'], auth_info)) expiry = self.token_api._get_default_expire_time(context=context) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} elif 'token' in auth: old_token = auth['token'].get('id', None) tenant_name = auth.get('tenantName') try: old_token_ref = self.token_api.get_token(context=context, token_id=old_token) except exception.NotFound: LOG.warning("Token not found: " + str(old_token)) raise exception.Unauthorized() user_ref = old_token_ref['user'] user_id = user_ref['id'] current_user_ref = self.identity_api.get_user(context=context, user_id=user_id) # If the user is disabled don't allow them to authenticate if not current_user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_id) raise exception.Unauthorized() if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] else: tenant_id = auth.get('tenantId', None) tenants = self.identity_api.get_tenants_for_user(context, user_id) if tenant_id: if not tenant_id in tenants: LOG.warning('User %s is authorized for tenant %s' % (user_id, tenant_id)) raise exception.Unauthorized() expiry = old_token_ref['expires'] try: tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) except exception.TenantNotFound: tenant_ref = None metadata_ref = {} catalog_ref = {} except exception.MetadataNotFound: metadata_ref = {} catalog_ref = {} # If the tenant is disabled don't allow them to authenticate if tenant_ref and not tenant_ref.get('enabled', True): LOG.warning('Tenant %s is disabled' % tenant_id) raise exception.Unauthorized() if tenant_ref: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) auth_token_data = dict(dict(user=current_user_ref, tenant=tenant_ref, metadata=metadata_ref)) auth_token_data['expires'] = expiry auth_token_data['id'] = 'placeholder' roles_ref = [] for role_id in metadata_ref.get('roles', []): role_ref = self.identity_api.get_role(context, role_id) roles_ref.append(dict(name=role_ref['name'])) token_data = self._format_token(auth_token_data, roles_ref) service_catalog = self._format_catalog(catalog_ref) token_data['access']['serviceCatalog'] = service_catalog if config.CONF.signing.token_format == 'UUID': token_id = uuid.uuid4().hex elif config.CONF.signing.token_format == 'PKI': token_id = cms.cms_sign_token(json.dumps(token_data), config.CONF.signing.certfile, config.CONF.signing.keyfile) else: raise exception.UnexpectedError( 'Invalid value for token_format: %s.' ' Allowed values are PKI or UUID.' % config.CONF.signing.token_format) try: self.token_api.create_token( context, token_id, dict(key=token_id, id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) except Exception as e: # an identical token may have been created already. # if so, return the token_data as it is also identical try: self.token_api.get_token(context=context, token_id=token_id) except exception.TokenNotFound: raise e token_data['access']['token']['id'] = token_id return token_data def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) if cms.is_ans1_token(token_id): data = json.loads(cms.cms_verify(cms.token_to_cms(token_id), config.CONF.signing.certfile, config.CONF.signing.ca_certs)) data['access']['token']['user'] = data['access']['user'] data['access']['token']['metadata'] = data['access']['metadata'] if belongs_to: assert data['access']['token']['tenant']['id'] == belongs_to token_ref = data['access']['token'] else: token_ref = self.token_api.get_token(context=context, token_id=token_id) return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get('belongsTo') assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get('belongsTo') token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if possible # This is needed for on-behalf-of requests catalog_ref = None if token_ref.get('tenant'): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def revocation_list(self, context, auth=None): self.assert_admin(context) tokens = self.token_api.list_revoked_tokens(context) for t in tokens: expires = t['expires'] if not (expires and isinstance(expires, unicode)): t['expires'] = timeutils.isotime(expires) data = {'revoked': tokens} json_data = json.dumps(data) signed_text = cms.cms_sign_text(json_data, config.CONF.signing.certfile, config.CONF.signing.keyfile) return {'signed': signed_text} def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" self.assert_admin(context) token_ref = self._get_token_ref(context, token_id) catalog_ref = None if token_ref.get('tenant'): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=token_ref['metadata']) return self._format_endpoint_list(catalog_ref) def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: if not isinstance(expires, unicode): expires = timeutils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) if metadata_ref: if 'is_admin' in metadata_ref: o['access']['metadata'] = {'is_admin': metadata_ref['is_admin']} else: o['access']['metadata'] = {'is_admin': 0} if 'roles' in metadata_ref: o['access']['metadata']['roles'] = metadata_ref['roles'] return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() def _format_endpoint_list(self, catalog_ref): """Formats a list of endpoints according to Identity API v2. The v2.0 API wants an endpoint list to look like:: { 'endpoints': [ { 'id': $endpoint_id, 'name': $SERVICE[name], 'type': $SERVICE, 'tenantId': $tenant_id, 'region': $REGION, } ], 'endpoints_links': [], } """ if not catalog_ref: return {} endpoints = [] for region_name, region_ref in catalog_ref.iteritems(): for service_type, service_ref in region_ref.iteritems(): endpoints.append({ 'id': service_ref.get('id'), 'name': service_ref.get('name'), 'type': service_type, 'region': region_name, 'publicURL': service_ref.get('publicURL'), 'internalURL': service_ref.get('internalURL'), 'adminURL': service_ref.get('adminURL'), }) return {'endpoints': endpoints, 'endpoints_links': []} class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': 'https://github.com/openstack/identity-api', } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter()
./CrossVul/dataset_final_sorted/CWE-255/py/bad_3789_0
crossvul-python_data_bad_3793_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the EC2 Credentials service. This service allows the creation of access/secret credentials used for the ec2 interop layer of OpenStack. A user can create as many access/secret pairs, each of which map to a specific tenant. This is required because OpenStack supports a user belonging to multiple tenants, whereas the signatures created on ec2-style requests don't allow specification of which tenant the user wishs to act upon. To complete the cycle, we provide a method that OpenStack services can use to validate a signature and get a corresponding openstack token. This token allows method calls to other services within the context the access/secret was created. As an example, nova requests keystone to validate the signature of a request, receives a token, and then makes a request to glance to list images needed to perform the requested task. """ import uuid from keystone import catalog from keystone.common import manager from keystone.common import utils from keystone.common import wsgi from keystone import config from keystone import exception from keystone import identity from keystone import policy from keystone import service from keystone import token CONF = config.CONF class Manager(manager.Manager): """Default pivot point for the EC2 Credentials backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.ec2.driver) class Ec2Extension(wsgi.ExtensionRouter): def add_routes(self, mapper): ec2_controller = Ec2Controller() # validation mapper.connect( '/ec2tokens', controller=ec2_controller, action='authenticate', conditions=dict(method=['POST'])) # crud mapper.connect( '/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='create_credential', conditions=dict(method=['POST'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='get_credentials', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='get_credential', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='delete_credential', conditions=dict(method=['DELETE'])) class Ec2Controller(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() self.ec2_api = Manager() super(Ec2Controller, self).__init__() def check_signature(self, creds_ref, credentials): signer = utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) if utils.auth_str_equal(credentials['signature'], signature): return # NOTE(vish): Some libraries don't use the port when signing # requests, so try again without port. elif ':' in credentials['signature']: hostname, _port = credentials['host'].split(':') credentials['host'] = hostname signature = signer.generate(credentials) if not utils.auth_str_equal(credentials.signature, signature): raise exception.Unauthorized(message='Invalid EC2 signature.') else: raise exception.Unauthorized(message='EC2 signature not supplied.') def authenticate(self, context, credentials=None, ec2Credentials=None): """Validate a signed EC2 request and provide a token. Other services (such as Nova) use this **admin** call to determine if a request they signed received is from a valid user. If it is a valid signature, an openstack token that maps to the user/tenant is returned to the caller, along with all the other details returned from a normal token validation call. The returned token is useful for making calls to other OpenStack services within the context of the request. :param context: standard context :param credentials: dict of ec2 signature :param ec2Credentials: DEPRECATED dict of ec2 signature :returns: token: openstack token equivalent to access key along with the corresponding service catalog and roles """ # FIXME(ja): validate that a service token was used! # NOTE(termie): backwards compat hack if not credentials and ec2Credentials: credentials = ec2Credentials if not 'access' in credentials: raise exception.Unauthorized(message='EC2 signature not supplied.') creds_ref = self._get_credentials(context, credentials['access']) self.check_signature(creds_ref, credentials) # TODO(termie): don't create new tokens every time # TODO(termie): this is copied from TokenController.authenticate token_id = uuid.uuid4().hex tenant_ref = self.identity_api.get_tenant( context=context, tenant_id=creds_ref['tenant_id']) user_ref = self.identity_api.get_user( context=context, user_id=creds_ref['user_id']) metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # TODO(termie): make this a util function or something # TODO(termie): i don't think the ec2 middleware currently expects a # full return, but it contains a note saying that it # would be better to expect a full return token_controller = service.TokenController() return token_controller._format_authenticate( token_ref, roles_ref, catalog_ref) def create_credential(self, context, user_id, tenant_id): """Create a secret/access pair for use with ec2 style auth. Generates a new set of credentials that map the the user/tenant pair. :param context: standard context :param user_id: id of user :param tenant_id: id of tenant :returns: credential: dict of ec2 credential """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) self._assert_valid_tenant_id(context, tenant_id) cred_ref = {'user_id': user_id, 'tenant_id': tenant_id, 'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex} self.ec2_api.create_credential(context, cred_ref['access'], cred_ref) return {'credential': cred_ref} def get_credentials(self, context, user_id): """List all credentials for a user. :param context: standard context :param user_id: id of user :returns: credentials: list of ec2 credential dicts """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) return {'credentials': self.ec2_api.list_credentials(context, user_id)} def get_credential(self, context, user_id, credential_id): """Retreive a user's access/secret pair by the access key. Grab the full access/secret pair for a given access key. :param context: standard context :param user_id: id of user :param credential_id: access key for credentials :returns: credential: dict of ec2 credential """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) creds = self._get_credentials(context, credential_id) return {'credential': creds} def delete_credential(self, context, user_id, credential_id): """Delete a user's access/secret pair. Used to revoke a user's access/secret pair :param context: standard context :param user_id: id of user :param credential_id: access key for credentials :returns: bool: success """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_owner(context, user_id, credential_id) self._assert_valid_user_id(context, user_id) self._get_credentials(context, credential_id) return self.ec2_api.delete_credential(context, credential_id) def _get_credentials(self, context, credential_id): """Return credentials from an ID. :param context: standard context :param credential_id: id of credential :raises exception.Unauthorized: when credential id is invalid :returns: credential: dict of ec2 credential. """ creds = self.ec2_api.get_credential(context, credential_id) if not creds: raise exception.Unauthorized(message='EC2 access key not found.') return creds def _assert_identity(self, context, user_id): """Check that the provided token belongs to the user. :param context: standard context :param user_id: id of user :raises exception.Forbidden: when token is invalid """ try: token_ref = self.token_api.get_token( context=context, token_id=context['token_id']) except exception.TokenNotFound: raise exception.Unauthorized() token_user_id = token_ref['user'].get('id') if not token_user_id == user_id: raise exception.Forbidden() def _is_admin(self, context): """Wrap admin assertion error return statement. :param context: standard context :returns: bool: success """ try: self.assert_admin(context) return True except exception.Forbidden: return False def _assert_owner(self, context, user_id, credential_id): """Ensure the provided user owns the credential. :param context: standard context :param user_id: expected credential owner :param credential_id: id of credential object :raises exception.Forbidden: on failure """ cred_ref = self.ec2_api.get_credential(context, credential_id) if not user_id == cred_ref['user_id']: raise exception.Forbidden() def _assert_valid_user_id(self, context, user_id): """Ensure a valid user id. :param context: standard context :param user_id: expected credential owner :raises exception.UserNotFound: on failure """ user_ref = self.identity_api.get_user( context=context, user_id=user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) def _assert_valid_tenant_id(self, context, tenant_id): """Ensure a valid tenant id. :param context: standard context :param user_id: expected credential owner :raises exception.UserNotFound: on failure """ tenant_ref = self.identity_api.get_tenant( context=context, tenant_id=tenant_id) if not tenant_ref: raise exception.TenantNotFound(tenant_id=tenant_id)
./CrossVul/dataset_final_sorted/CWE-255/py/bad_3793_0
crossvul-python_data_bad_5754_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-255/py/bad_5754_0
crossvul-python_data_bad_3792_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the EC2 Credentials service. This service allows the creation of access/secret credentials used for the ec2 interop layer of OpenStack. A user can create as many access/secret pairs, each of which map to a specific tenant. This is required because OpenStack supports a user belonging to multiple tenants, whereas the signatures created on ec2-style requests don't allow specification of which tenant the user wishs to act upon. To complete the cycle, we provide a method that OpenStack services can use to validate a signature and get a corresponding openstack token. This token allows method calls to other services within the context the access/secret was created. As an example, nova requests keystone to validate the signature of a request, receives a token, and then makes a request to glance to list images needed to perform the requested task. """ import uuid from keystone import catalog from keystone import config from keystone import exception from keystone import identity from keystone import policy from keystone import service from keystone import token from keystone.common import manager from keystone.common import utils from keystone.common import wsgi CONF = config.CONF class Manager(manager.Manager): """Default pivot point for the EC2 Credentials backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.ec2.driver) class Ec2Extension(wsgi.ExtensionRouter): def add_routes(self, mapper): ec2_controller = Ec2Controller() # validation mapper.connect('/ec2tokens', controller=ec2_controller, action='authenticate', conditions=dict(method=['POST'])) # crud mapper.connect('/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='create_credential', conditions=dict(method=['POST'])) mapper.connect('/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='get_credentials', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='get_credential', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='delete_credential', conditions=dict(method=['DELETE'])) class Ec2Controller(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() self.ec2_api = Manager() super(Ec2Controller, self).__init__() def check_signature(self, creds_ref, credentials): signer = utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) if utils.auth_str_equal(credentials['signature'], signature): return # NOTE(vish): Some libraries don't use the port when signing # requests, so try again without port. elif ':' in credentials['signature']: hostname, _port = credentials['host'].split(':') credentials['host'] = hostname signature = signer.generate(credentials) if not utils.auth_str_equal(credentials.signature, signature): raise exception.Unauthorized(message='Invalid EC2 signature.') else: raise exception.Unauthorized(message='EC2 signature not supplied.') def authenticate(self, context, credentials=None, ec2Credentials=None): """Validate a signed EC2 request and provide a token. Other services (such as Nova) use this **admin** call to determine if a request they signed received is from a valid user. If it is a valid signature, an openstack token that maps to the user/tenant is returned to the caller, along with all the other details returned from a normal token validation call. The returned token is useful for making calls to other OpenStack services within the context of the request. :param context: standard context :param credentials: dict of ec2 signature :param ec2Credentials: DEPRECATED dict of ec2 signature :returns: token: openstack token equivalent to access key along with the corresponding service catalog and roles """ # FIXME(ja): validate that a service token was used! # NOTE(termie): backwards compat hack if not credentials and ec2Credentials: credentials = ec2Credentials if not 'access' in credentials: raise exception.Unauthorized(message='EC2 signature not supplied.') creds_ref = self._get_credentials(context, credentials['access']) self.check_signature(creds_ref, credentials) # TODO(termie): don't create new tokens every time # TODO(termie): this is copied from TokenController.authenticate token_id = uuid.uuid4().hex tenant_ref = self.identity_api.get_tenant( context=context, tenant_id=creds_ref['tenant_id']) user_ref = self.identity_api.get_user( context=context, user_id=creds_ref['user_id']) metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # TODO(termie): make this a util function or something # TODO(termie): i don't think the ec2 middleware currently expects a # full return, but it contains a note saying that it # would be better to expect a full return token_controller = service.TokenController() return token_controller._format_authenticate( token_ref, roles_ref, catalog_ref) def create_credential(self, context, user_id, tenant_id): """Create a secret/access pair for use with ec2 style auth. Generates a new set of credentials that map the the user/tenant pair. :param context: standard context :param user_id: id of user :param tenant_id: id of tenant :returns: credential: dict of ec2 credential """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) self._assert_valid_tenant_id(context, tenant_id) cred_ref = {'user_id': user_id, 'tenant_id': tenant_id, 'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex} self.ec2_api.create_credential(context, cred_ref['access'], cred_ref) return {'credential': cred_ref} def get_credentials(self, context, user_id): """List all credentials for a user. :param context: standard context :param user_id: id of user :returns: credentials: list of ec2 credential dicts """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) return {'credentials': self.ec2_api.list_credentials(context, user_id)} def get_credential(self, context, user_id, credential_id): """Retreive a user's access/secret pair by the access key. Grab the full access/secret pair for a given access key. :param context: standard context :param user_id: id of user :param credential_id: access key for credentials :returns: credential: dict of ec2 credential """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) creds = self._get_credentials(context, credential_id) return {'credential': creds} def delete_credential(self, context, user_id, credential_id): """Delete a user's access/secret pair. Used to revoke a user's access/secret pair :param context: standard context :param user_id: id of user :param credential_id: access key for credentials :returns: bool: success """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_owner(context, user_id, credential_id) self._assert_valid_user_id(context, user_id) self._get_credentials(context, credential_id) return self.ec2_api.delete_credential(context, credential_id) def _get_credentials(self, context, credential_id): """Return credentials from an ID. :param context: standard context :param credential_id: id of credential :raises exception.Unauthorized: when credential id is invalid :returns: credential: dict of ec2 credential. """ creds = self.ec2_api.get_credential(context, credential_id) if not creds: raise exception.Unauthorized(message='EC2 access key not found.') return creds def _assert_identity(self, context, user_id): """Check that the provided token belongs to the user. :param context: standard context :param user_id: id of user :raises exception.Forbidden: when token is invalid """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.TokenNotFound: raise exception.Unauthorized() token_user_id = token_ref['user'].get('id') if not token_user_id == user_id: raise exception.Forbidden() def _is_admin(self, context): """Wrap admin assertion error return statement. :param context: standard context :returns: bool: success """ try: self.assert_admin(context) return True except exception.Forbidden: return False def _assert_owner(self, context, user_id, credential_id): """Ensure the provided user owns the credential. :param context: standard context :param user_id: expected credential owner :param credential_id: id of credential object :raises exception.Forbidden: on failure """ cred_ref = self.ec2_api.get_credential(context, credential_id) if not user_id == cred_ref['user_id']: raise exception.Forbidden() def _assert_valid_user_id(self, context, user_id): """Ensure a valid user id. :param context: standard context :param user_id: expected credential owner :raises exception.UserNotFound: on failure """ user_ref = self.identity_api.get_user( context=context, user_id=user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) def _assert_valid_tenant_id(self, context, tenant_id): """Ensure a valid tenant id. :param context: standard context :param user_id: expected credential owner :raises exception.UserNotFound: on failure """ tenant_ref = self.identity_api.get_tenant( context=context, tenant_id=tenant_id) if not tenant_ref: raise exception.TenantNotFound(tenant_id=tenant_id)
./CrossVul/dataset_final_sorted/CWE-255/py/bad_3792_0
crossvul-python_data_good_742_1
"""Parse (absolute and relative) URLs. urlparse module is based upon the following RFC specifications. RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding and L. Masinter, January 2005. RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter and L.Masinter, December 1999. RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. Berners-Lee, R. Fielding, and L. Masinter, August 1998. RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June 1995. RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. McCahill, December 1994 RFC 3986 is considered the current standard and any future changes to urlparse module should conform with it. The urlparse module is currently not entirely compliant with this RFC due to defacto scenarios for parsing, and for backward compatibility purposes, some parsing quirks from older RFCs are retained. The testcases in test_urlparse.py provides a good indicator of parsing behavior. """ import re import sys import collections import warnings __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", "urlsplit", "urlunsplit", "urlencode", "parse_qs", "parse_qsl", "quote", "quote_plus", "quote_from_bytes", "unquote", "unquote_plus", "unquote_to_bytes", "DefragResult", "ParseResult", "SplitResult", "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] # A classification of schemes. # The empty string classifies URLs with no scheme specified, # being the default value returned by “urlsplit” and “urlparse”. uses_relative = ['', 'ftp', 'http', 'gopher', 'nntp', 'imap', 'wais', 'file', 'https', 'shttp', 'mms', 'prospero', 'rtsp', 'rtspu', 'sftp', 'svn', 'svn+ssh', 'ws', 'wss'] uses_netloc = ['', 'ftp', 'http', 'gopher', 'nntp', 'telnet', 'imap', 'wais', 'file', 'mms', 'https', 'shttp', 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh', 'ws', 'wss'] uses_params = ['', 'ftp', 'hdl', 'prospero', 'http', 'imap', 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', 'mms', 'sftp', 'tel'] # These are not actually used anymore, but should stay for backwards # compatibility. (They are undocumented, but have a public-looking name.) non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] uses_query = ['', 'http', 'wais', 'imap', 'https', 'shttp', 'mms', 'gopher', 'rtsp', 'rtspu', 'sip', 'sips'] uses_fragment = ['', 'ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais', 'https', 'shttp', 'snews', 'file', 'prospero'] # Characters valid in scheme names scheme_chars = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789' '+-.') # XXX: Consider replacing with functools.lru_cache MAX_CACHE_SIZE = 20 _parse_cache = {} def clear_cache(): """Clear the parse cache and the quoters cache.""" _parse_cache.clear() _safe_quoters.clear() # Helpers for bytes handling # For 3.2, we deliberately require applications that # handle improperly quoted URLs to do their own # decoding and encoding. If valid use cases are # presented, we may relax this by using latin-1 # decoding internally for 3.3 _implicit_encoding = 'ascii' _implicit_errors = 'strict' def _noop(obj): return obj def _encode_result(obj, encoding=_implicit_encoding, errors=_implicit_errors): return obj.encode(encoding, errors) def _decode_args(args, encoding=_implicit_encoding, errors=_implicit_errors): return tuple(x.decode(encoding, errors) if x else '' for x in args) def _coerce_args(*args): # Invokes decode if necessary to create str args # and returns the coerced inputs along with # an appropriate result coercion function # - noop for str inputs # - encoding function otherwise str_input = isinstance(args[0], str) for arg in args[1:]: # We special-case the empty string to support the # "scheme=''" default argument to some functions if arg and isinstance(arg, str) != str_input: raise TypeError("Cannot mix str and non-str arguments") if str_input: return args + (_noop,) return _decode_args(args) + (_encode_result,) # Result objects are more helpful than simple tuples class _ResultMixinStr(object): """Standard approach to encoding parsed results from str to bytes""" __slots__ = () def encode(self, encoding='ascii', errors='strict'): return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) class _ResultMixinBytes(object): """Standard approach to decoding parsed results from bytes to str""" __slots__ = () def decode(self, encoding='ascii', errors='strict'): return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) class _NetlocResultMixinBase(object): """Shared methods for the parsed result objects containing a netloc element""" __slots__ = () @property def username(self): return self._userinfo[0] @property def password(self): return self._userinfo[1] @property def hostname(self): hostname = self._hostinfo[0] if not hostname: return None # Scoped IPv6 address may have zone info, which must not be lowercased # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys separator = '%' if isinstance(hostname, str) else b'%' hostname, percent, zone = hostname.partition(separator) return hostname.lower() + percent + zone @property def port(self): port = self._hostinfo[1] if port is not None: try: port = int(port, 10) except ValueError: message = f'Port could not be cast to integer value as {port!r}' raise ValueError(message) from None if not ( 0 <= port <= 65535): raise ValueError("Port out of range 0-65535") return port class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): __slots__ = () @property def _userinfo(self): netloc = self.netloc userinfo, have_info, hostinfo = netloc.rpartition('@') if have_info: username, have_password, password = userinfo.partition(':') if not have_password: password = None else: username = password = None return username, password @property def _hostinfo(self): netloc = self.netloc _, _, hostinfo = netloc.rpartition('@') _, have_open_br, bracketed = hostinfo.partition('[') if have_open_br: hostname, _, port = bracketed.partition(']') _, _, port = port.partition(':') else: hostname, _, port = hostinfo.partition(':') if not port: port = None return hostname, port class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): __slots__ = () @property def _userinfo(self): netloc = self.netloc userinfo, have_info, hostinfo = netloc.rpartition(b'@') if have_info: username, have_password, password = userinfo.partition(b':') if not have_password: password = None else: username = password = None return username, password @property def _hostinfo(self): netloc = self.netloc _, _, hostinfo = netloc.rpartition(b'@') _, have_open_br, bracketed = hostinfo.partition(b'[') if have_open_br: hostname, _, port = bracketed.partition(b']') _, _, port = port.partition(b':') else: hostname, _, port = hostinfo.partition(b':') if not port: port = None return hostname, port from collections import namedtuple _DefragResultBase = namedtuple('DefragResult', 'url fragment') _SplitResultBase = namedtuple( 'SplitResult', 'scheme netloc path query fragment') _ParseResultBase = namedtuple( 'ParseResult', 'scheme netloc path params query fragment') _DefragResultBase.__doc__ = """ DefragResult(url, fragment) A 2-tuple that contains the url without fragment identifier and the fragment identifier as a separate argument. """ _DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" _DefragResultBase.fragment.__doc__ = """ Fragment identifier separated from URL, that allows indirect identification of a secondary resource by reference to a primary resource and additional identifying information. """ _SplitResultBase.__doc__ = """ SplitResult(scheme, netloc, path, query, fragment) A 5-tuple that contains the different components of a URL. Similar to ParseResult, but does not split params. """ _SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" _SplitResultBase.netloc.__doc__ = """ Network location where the request is made to. """ _SplitResultBase.path.__doc__ = """ The hierarchical path, such as the path to a file to download. """ _SplitResultBase.query.__doc__ = """ The query component, that contains non-hierarchical data, that along with data in path component, identifies a resource in the scope of URI's scheme and network location. """ _SplitResultBase.fragment.__doc__ = """ Fragment identifier, that allows indirect identification of a secondary resource by reference to a primary resource and additional identifying information. """ _ParseResultBase.__doc__ = """ ParseResult(scheme, netloc, path, params, query, fragment) A 6-tuple that contains components of a parsed URL. """ _ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ _ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ _ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ _ParseResultBase.params.__doc__ = """ Parameters for last path element used to dereference the URI in order to provide access to perform some operation on the resource. """ _ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ _ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ # For backwards compatibility, alias _NetlocResultMixinStr # ResultBase is no longer part of the documented API, but it is # retained since deprecating it isn't worth the hassle ResultBase = _NetlocResultMixinStr # Structured result objects for string data class DefragResult(_DefragResultBase, _ResultMixinStr): __slots__ = () def geturl(self): if self.fragment: return self.url + '#' + self.fragment else: return self.url class SplitResult(_SplitResultBase, _NetlocResultMixinStr): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResult(_ParseResultBase, _NetlocResultMixinStr): __slots__ = () def geturl(self): return urlunparse(self) # Structured result objects for bytes data class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): __slots__ = () def geturl(self): if self.fragment: return self.url + b'#' + self.fragment else: return self.url class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): __slots__ = () def geturl(self): return urlunparse(self) # Set up the encode/decode result pairs def _fix_result_transcoding(): _result_pairs = ( (DefragResult, DefragResultBytes), (SplitResult, SplitResultBytes), (ParseResult, ParseResultBytes), ) for _decoded, _encoded in _result_pairs: _decoded._encoded_counterpart = _encoded _encoded._decoded_counterpart = _decoded _fix_result_transcoding() del _fix_result_transcoding def urlparse(url, scheme='', allow_fragments=True): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) splitresult = urlsplit(url, scheme, allow_fragments) scheme, netloc, url, query, fragment = splitresult if scheme in uses_params and ';' in url: url, params = _splitparams(url) else: params = '' result = ParseResult(scheme, netloc, url, params, query, fragment) return _coerce_result(result) def _splitparams(url): if '/' in url: i = url.find(';', url.rfind('/')) if i < 0: return url, '' else: i = url.find(';') return url[:i], url[i+1:] def _splitnetloc(url, start=0): delim = len(url) # position of end of domain part of url, default is end for c in '/?#': # look for delimiters; the order is NOT important wdelim = url.find(c, start) # find first of this delim if wdelim >= 0: # if found delim = min(delim, wdelim) # use earliest delim position return url[start:delim], url[delim:] # return (domain, rest) def _checknetloc(netloc): if not netloc or netloc.isascii(): return # looking for characters like \u2100 that expand to 'a/c' # IDNA uses NFKC equivalence, so normalize for this check import unicodedata n = netloc.replace('@', '') # ignore characters already included n = n.replace(':', '') # but not the surrounding text n = n.replace('#', '') n = n.replace('?', '') netloc2 = unicodedata.normalize('NFKC', n) if n == netloc2: return for c in '/?#@:': if c in netloc2: raise ValueError("netloc '" + netloc + "' contains invalid " + "characters under NFKC normalization") def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) allow_fragments = bool(allow_fragments) key = url, scheme, allow_fragments, type(url), type(scheme) cached = _parse_cache.get(key, None) if cached: return _coerce_result(cached) if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case url = url[i+1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult('http', netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) for c in url[:i]: if c not in scheme_chars: break else: # make sure "url" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i+1:] if not rest or any(c not in '0123456789' for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) def urlunparse(components): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" scheme, netloc, url, params, query, fragment, _coerce_result = ( _coerce_args(*components)) if params: url = "%s;%s" % (url, params) return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) def urlunsplit(components): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" scheme, netloc, url, query, fragment, _coerce_result = ( _coerce_args(*components)) if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if query: url = url + '?' + query if fragment: url = url + '#' + fragment return _coerce_result(url) def urljoin(base, url, allow_fragments=True): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url if not url: return base base, url, _coerce_result = _coerce_args(base, url) bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return _coerce_result(url) if scheme in uses_netloc: if netloc: return _coerce_result(urlunparse((scheme, netloc, path, params, query, fragment))) netloc = bnetloc if not path and not params: path = bpath params = bparams if not query: query = bquery return _coerce_result(urlunparse((scheme, netloc, path, params, query, fragment))) base_parts = bpath.split('/') if base_parts[-1] != '': # the last item is not a directory, so will not be taken into account # in resolving the relative path del base_parts[-1] # for rfc3986, ignore all base path should the first character be root. if path[:1] == '/': segments = path.split('/') else: segments = base_parts + path.split('/') # filter out elements that would cause redundant slashes on re-joining # the resolved_path segments[1:-1] = filter(None, segments[1:-1]) resolved_path = [] for seg in segments: if seg == '..': try: resolved_path.pop() except IndexError: # ignore any .. segments that would otherwise cause an IndexError # when popped from resolved_path if resolving for rfc3986 pass elif seg == '.': continue else: resolved_path.append(seg) if segments[-1] in ('.', '..'): # do some post-processing here. if the last segment was a relative dir, # then we need to append the trailing '/' resolved_path.append('') return _coerce_result(urlunparse((scheme, netloc, '/'.join( resolved_path) or '/', params, query, fragment))) def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ url, _coerce_result = _coerce_args(url) if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) else: frag = '' defrag = url return _coerce_result(DefragResult(defrag, frag)) _hexdig = '0123456789ABCDEFabcdef' _hextobyte = None def unquote_to_bytes(string): """unquote_to_bytes('abc%20def') -> b'abc def'.""" # Note: strings are encoded as UTF-8. This is only an issue if it contains # unescaped non-ASCII characters, which URIs should not. if not string: # Is it a string-like object? string.split return b'' if isinstance(string, str): string = string.encode('utf-8') bits = string.split(b'%') if len(bits) == 1: return string res = [bits[0]] append = res.append # Delay the initialization of the table to not waste memory # if the function is never called global _hextobyte if _hextobyte is None: _hextobyte = {(a + b).encode(): bytes.fromhex(a + b) for a in _hexdig for b in _hexdig} for item in bits[1:]: try: append(_hextobyte[item[:2]]) append(item[2:]) except KeyError: append(b'%') append(item) return b''.join(res) _asciire = re.compile('([\x00-\x7f]+)') def unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res) def parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. encoding and errors: specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). Returns a dictionary. """ parsed_result = {} pairs = parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors, max_num_fields=max_num_fields) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. encoding and errors: specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). Returns a list, as G-d intended. """ qs, _coerce_result = _coerce_args(qs) # If max_num_fields is defined then check that the number of fields # is less than max_num_fields. This prevents a memory exhaustion DOS # attack via post bodies with many fields. if max_num_fields is not None: num_fields = 1 + qs.count('&') + qs.count(';') if max_num_fields < num_fields: raise ValueError('Max number of fields exceeded') pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def unquote_plus(string, encoding='utf-8', errors='replace'): """Like unquote(), but also replace plus signs by spaces, as required for unquoting HTML form values. unquote_plus('%7e/abc+def') -> '~/abc def' """ string = string.replace('+', ' ') return unquote(string, encoding, errors) _ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' b'abcdefghijklmnopqrstuvwxyz' b'0123456789' b'_.-~') _ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) _safe_quoters = {} class Quoter(collections.defaultdict): """A mapping from bytes (in range(0,256)) to strings. String values are percent-encoded byte values, unless the key < 128, and in the "safe" set (either the specified safe set, or default set). """ # Keeps a cache internally, using defaultdict, for efficiency (lookups # of cached keys don't call Python code at all). def __init__(self, safe): """safe: bytes object.""" self.safe = _ALWAYS_SAFE.union(safe) def __repr__(self): # Without this, will just display as a defaultdict return "<%s %r>" % (self.__class__.__name__, dict(self)) def __missing__(self, b): # Handle a cache miss. Store quoted string in cache and return. res = chr(b) if b in self.safe else '%{:02X}'.format(b) self[b] = res return res def quote(string, safe='/', encoding=None, errors=None): """quote('abc def') -> 'abc%20def' Each part of a URL, e.g. the path info, the query, etc., has a different set of reserved characters that must be quoted. The quote function offers a cautious (not minimal) way to quote a string for most of these parts. RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists the following (un)reserved characters. unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" reserved = gen-delims / sub-delims gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" Each of the reserved characters is reserved in some component of a URL, but not necessarily in all of them. The quote function %-escapes all characters that are neither in the unreserved chars ("always safe") nor the additional chars set via the safe arg. The default for the safe arg is '/'. The character is reserved, but in typical usage the quote function is being called on a path where the existing slash characters are to be preserved. Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. Now, "~" is included in the set of unreserved characters. string and safe may be either str or bytes objects. encoding and errors must not be specified if string is a bytes object. The optional encoding and errors parameters specify how to deal with non-ASCII characters, as accepted by the str.encode method. By default, encoding='utf-8' (characters are encoded with UTF-8), and errors='strict' (unsupported characters raise a UnicodeEncodeError). """ if isinstance(string, str): if not string: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'strict' string = string.encode(encoding, errors) else: if encoding is not None: raise TypeError("quote() doesn't support 'encoding' for bytes") if errors is not None: raise TypeError("quote() doesn't support 'errors' for bytes") return quote_from_bytes(string, safe) def quote_plus(string, safe='', encoding=None, errors=None): """Like quote(), but also replace ' ' with '+', as required for quoting HTML form values. Plus signs in the original string are escaped unless they are included in safe. It also does not have safe default to '/'. """ # Check if ' ' in string, where string may either be a str or bytes. If # there are no spaces, the regular quote will produce the right answer. if ((isinstance(string, str) and ' ' not in string) or (isinstance(string, bytes) and b' ' not in string)): return quote(string, safe, encoding, errors) if isinstance(safe, str): space = ' ' else: space = b' ' string = quote(string, safe + space, encoding, errors) return string.replace(' ', '+') def quote_from_bytes(bs, safe='/'): """Like quote(), but accepts a bytes object rather than a str, and does not perform string-to-bytes encoding. It always returns an ASCII string. quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' """ if not isinstance(bs, (bytes, bytearray)): raise TypeError("quote_from_bytes() expected bytes") if not bs: return '' if isinstance(safe, str): # Normalize 'safe' by converting to bytes and removing non-ASCII chars safe = safe.encode('ascii', 'ignore') else: safe = bytes([c for c in safe if c < 128]) if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): return bs.decode() try: quoter = _safe_quoters[safe] except KeyError: _safe_quoters[safe] = quoter = Quoter(safe).__getitem__ return ''.join([quoter(char) for char in bs]) def urlencode(query, doseq=False, safe='', encoding=None, errors=None, quote_via=quote_plus): """Encode a dict or sequence of two-element tuples into a URL query string. If any values in the query arg are sequences and doseq is true, each sequence element is converted to a separate parameter. If the query arg is a sequence of two-element tuples, the order of the parameters in the output will match the order of parameters in the input. The components of a query arg may each be either a string or a bytes type. The safe, encoding, and errors parameters are passed down to the function specified by quote_via (encoding and errors only if a component is a str). """ if hasattr(query, "items"): query = query.items() else: # It's a bother at times that strings and string-like objects are # sequences. try: # non-sequence items should not work with len() # non-empty strings will fail this if len(query) and not isinstance(query[0], tuple): raise TypeError # Zero-length sequences of all types will get here and succeed, # but that's a minor nit. Since the original implementation # allowed empty dicts that type of behavior probably should be # preserved for consistency except TypeError: ty, va, tb = sys.exc_info() raise TypeError("not a valid non-string sequence " "or mapping object").with_traceback(tb) l = [] if not doseq: for k, v in query: if isinstance(k, bytes): k = quote_via(k, safe) else: k = quote_via(str(k), safe, encoding, errors) if isinstance(v, bytes): v = quote_via(v, safe) else: v = quote_via(str(v), safe, encoding, errors) l.append(k + '=' + v) else: for k, v in query: if isinstance(k, bytes): k = quote_via(k, safe) else: k = quote_via(str(k), safe, encoding, errors) if isinstance(v, bytes): v = quote_via(v, safe) l.append(k + '=' + v) elif isinstance(v, str): v = quote_via(v, safe, encoding, errors) l.append(k + '=' + v) else: try: # Is this a sufficient test for sequence-ness? x = len(v) except TypeError: # not a sequence v = quote_via(str(v), safe, encoding, errors) l.append(k + '=' + v) else: # loop over the sequence for elt in v: if isinstance(elt, bytes): elt = quote_via(elt, safe) else: elt = quote_via(str(elt), safe, encoding, errors) l.append(k + '=' + elt) return '&'.join(l) def to_bytes(url): warnings.warn("urllib.parse.to_bytes() is deprecated as of 3.8", DeprecationWarning, stacklevel=2) return _to_bytes(url) def _to_bytes(url): """to_bytes(u"URL") --> 'URL'.""" # Most URL schemes require ASCII. If that changes, the conversion # can be relaxed. # XXX get rid of to_bytes() if isinstance(url, str): try: url = url.encode("ASCII").decode() except UnicodeError: raise UnicodeError("URL " + repr(url) + " contains non-ASCII characters") return url def unwrap(url): """Transform a string like '<URL:scheme://host/path>' into 'scheme://host/path'. The string is returned unchanged if it's not a wrapped URL. """ url = str(url).strip() if url[:1] == '<' and url[-1:] == '>': url = url[1:-1].strip() if url[:4] == 'URL:': url = url[4:].strip() return url def splittype(url): warnings.warn("urllib.parse.splittype() is deprecated as of 3.8, " "use urllib.parse.urlparse() instead", DeprecationWarning, stacklevel=2) return _splittype(url) _typeprog = None def _splittype(url): """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" global _typeprog if _typeprog is None: _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) match = _typeprog.match(url) if match: scheme, data = match.groups() return scheme.lower(), data return None, url def splithost(url): warnings.warn("urllib.parse.splithost() is deprecated as of 3.8, " "use urllib.parse.urlparse() instead", DeprecationWarning, stacklevel=2) return _splithost(url) _hostprog = None def _splithost(url): """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" global _hostprog if _hostprog is None: _hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL) match = _hostprog.match(url) if match: host_port, path = match.groups() if path and path[0] != '/': path = '/' + path return host_port, path return None, url def splituser(host): warnings.warn("urllib.parse.splituser() is deprecated as of 3.8, " "use urllib.parse.urlparse() instead", DeprecationWarning, stacklevel=2) return _splituser(host) def _splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" user, delim, host = host.rpartition('@') return (user if delim else None), host def splitpasswd(user): warnings.warn("urllib.parse.splitpasswd() is deprecated as of 3.8, " "use urllib.parse.urlparse() instead", DeprecationWarning, stacklevel=2) return _splitpasswd(user) def _splitpasswd(user): """splitpasswd('user:passwd') -> 'user', 'passwd'.""" user, delim, passwd = user.partition(':') return user, (passwd if delim else None) def splitport(host): warnings.warn("urllib.parse.splitport() is deprecated as of 3.8, " "use urllib.parse.urlparse() instead", DeprecationWarning, stacklevel=2) return _splitport(host) # splittag('/path#tag') --> '/path', 'tag' _portprog = None def _splitport(host): """splitport('host:port') --> 'host', 'port'.""" global _portprog if _portprog is None: _portprog = re.compile('(.*):([0-9]*)$', re.DOTALL) match = _portprog.match(host) if match: host, port = match.groups() if port: return host, port return host, None def splitnport(host, defport=-1): warnings.warn("urllib.parse.splitnport() is deprecated as of 3.8, " "use urllib.parse.urlparse() instead", DeprecationWarning, stacklevel=2) return _splitnport(host, defport) def _splitnport(host, defport=-1): """Split host and port, returning numeric port. Return given default port if no ':' found; defaults to -1. Return numerical port if a valid number are found after ':'. Return None if ':' but not a valid number.""" host, delim, port = host.rpartition(':') if not delim: host = port elif port: try: nport = int(port) except ValueError: nport = None return host, nport return host, defport def splitquery(url): warnings.warn("urllib.parse.splitquery() is deprecated as of 3.8, " "use urllib.parse.urlparse() instead", DeprecationWarning, stacklevel=2) return _splitquery(url) def _splitquery(url): """splitquery('/path?query') --> '/path', 'query'.""" path, delim, query = url.rpartition('?') if delim: return path, query return url, None def splittag(url): warnings.warn("urllib.parse.splittag() is deprecated as of 3.8, " "use urllib.parse.urlparse() instead", DeprecationWarning, stacklevel=2) return _splittag(url) def _splittag(url): """splittag('/path#tag') --> '/path', 'tag'.""" path, delim, tag = url.rpartition('#') if delim: return path, tag return url, None def splitattr(url): warnings.warn("urllib.parse.splitattr() is deprecated as of 3.8, " "use urllib.parse.urlparse() instead", DeprecationWarning, stacklevel=2) return _splitattr(url) def _splitattr(url): """splitattr('/path;attr1=value1;attr2=value2;...') -> '/path', ['attr1=value1', 'attr2=value2', ...].""" words = url.split(';') return words[0], words[1:] def splitvalue(attr): warnings.warn("urllib.parse.splitvalue() is deprecated as of 3.8, " "use urllib.parse.parse_qsl() instead", DeprecationWarning, stacklevel=2) return _splitvalue(attr) def _splitvalue(attr): """splitvalue('attr=value') --> 'attr', 'value'.""" attr, delim, value = attr.partition('=') return attr, (value if delim else None)
./CrossVul/dataset_final_sorted/CWE-255/py/good_742_1
crossvul-python_data_bad_743_1
"""Parse (absolute and relative) URLs. urlparse module is based upon the following RFC specifications. RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding and L. Masinter, January 2005. RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter and L.Masinter, December 1999. RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. Berners-Lee, R. Fielding, and L. Masinter, August 1998. RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998. RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June 1995. RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. McCahill, December 1994 RFC 3986 is considered the current standard and any future changes to urlparse module should conform with it. The urlparse module is currently not entirely compliant with this RFC due to defacto scenarios for parsing, and for backward compatibility purposes, some parsing quirks from older RFCs are retained. The testcases in test_urlparse.py provides a good indicator of parsing behavior. """ import re __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", "urlsplit", "urlunsplit", "parse_qs", "parse_qsl"] # A classification of schemes ('' means apply by default) uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap', 'wais', 'file', 'https', 'shttp', 'mms', 'prospero', 'rtsp', 'rtspu', '', 'sftp', 'svn', 'svn+ssh'] uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', 'imap', 'wais', 'file', 'mms', 'https', 'shttp', 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '', 'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh'] uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap', 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', 'mms', '', 'sftp', 'tel'] # These are not actually used anymore, but should stay for backwards # compatibility. (They are undocumented, but have a public-looking name.) non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms', 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', ''] uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais', 'https', 'shttp', 'snews', 'file', 'prospero', ''] # Characters valid in scheme names scheme_chars = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789' '+-.') MAX_CACHE_SIZE = 20 _parse_cache = {} def clear_cache(): """Clear the parse cache.""" _parse_cache.clear() class ResultMixin(object): """Shared methods for the parsed result objects.""" @property def username(self): netloc = self.netloc if "@" in netloc: userinfo = netloc.rsplit("@", 1)[0] if ":" in userinfo: userinfo = userinfo.split(":", 1)[0] return userinfo return None @property def password(self): netloc = self.netloc if "@" in netloc: userinfo = netloc.rsplit("@", 1)[0] if ":" in userinfo: return userinfo.split(":", 1)[1] return None @property def hostname(self): netloc = self.netloc.split('@')[-1] if '[' in netloc and ']' in netloc: return netloc.split(']')[0][1:].lower() elif ':' in netloc: return netloc.split(':')[0].lower() elif netloc == '': return None else: return netloc.lower() @property def port(self): netloc = self.netloc.split('@')[-1].split(']')[-1] if ':' in netloc: port = netloc.split(':')[1] if port: port = int(port, 10) # verify legal port if (0 <= port <= 65535): return port return None from collections import namedtuple class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin): __slots__ = () def geturl(self): return urlunparse(self) def urlparse(url, scheme='', allow_fragments=True): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" tuple = urlsplit(url, scheme, allow_fragments) scheme, netloc, url, query, fragment = tuple if scheme in uses_params and ';' in url: url, params = _splitparams(url) else: params = '' return ParseResult(scheme, netloc, url, params, query, fragment) def _splitparams(url): if '/' in url: i = url.find(';', url.rfind('/')) if i < 0: return url, '' else: i = url.find(';') return url[:i], url[i+1:] def _splitnetloc(url, start=0): delim = len(url) # position of end of domain part of url, default is end for c in '/?#': # look for delimiters; the order is NOT important wdelim = url.find(c, start) # find first of this delim if wdelim >= 0: # if found delim = min(delim, wdelim) # use earliest delim position return url[start:delim], url[delim:] # return (domain, rest) def _checknetloc(netloc): if not netloc or not isinstance(netloc, unicode): return # looking for characters like \u2100 that expand to 'a/c' # IDNA uses NFKC equivalence, so normalize for this check import unicodedata n = netloc.rpartition('@')[2] # ignore anything to the left of '@' n = n.replace(':', '') # ignore characters already included n = n.replace('#', '') # but not the surrounding text n = n.replace('?', '') netloc2 = unicodedata.normalize('NFKC', n) if n == netloc2: return for c in '/?#@:': if c in netloc2: raise ValueError("netloc '" + netloc + "' contains invalid " + "characters under NFKC normalization") def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" allow_fragments = bool(allow_fragments) key = url, scheme, allow_fragments, type(url), type(scheme) cached = _parse_cache.get(key, None) if cached: return cached if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case scheme = url[:i].lower() url = url[i+1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return v for c in url[:i]: if c not in scheme_chars: break else: # make sure "url" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i+1:] if not rest or any(c not in '0123456789' for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return v def urlunparse(data): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" scheme, netloc, url, params, query, fragment = data if params: url = "%s;%s" % (url, params) return urlunsplit((scheme, netloc, url, query, fragment)) def urlunsplit(data): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" scheme, netloc, url, query, fragment = data if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if query: url = url + '?' + query if fragment: url = url + '#' + fragment return url def urljoin(base, url, allow_fragments=True): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url if not url: return base bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return url if scheme in uses_netloc: if netloc: return urlunparse((scheme, netloc, path, params, query, fragment)) netloc = bnetloc if path[:1] == '/': return urlunparse((scheme, netloc, path, params, query, fragment)) if not path and not params: path = bpath params = bparams if not query: query = bquery return urlunparse((scheme, netloc, path, params, query, fragment)) segments = bpath.split('/')[:-1] + path.split('/') # XXX The stuff below is bogus in various ways... if segments[-1] == '.': segments[-1] = '' while '.' in segments: segments.remove('.') while 1: i = 1 n = len(segments) - 1 while i < n: if (segments[i] == '..' and segments[i-1] not in ('', '..')): del segments[i-1:i+1] break i = i+1 else: break if segments == ['', '..']: segments[-1] = '' elif len(segments) >= 2 and segments[-1] == '..': segments[-2:] = [''] return urlunparse((scheme, netloc, '/'.join(segments), params, query, fragment)) def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) return defrag, frag else: return url, '' try: unicode except NameError: def _is_unicode(x): return 0 else: def _is_unicode(x): return isinstance(x, unicode) # unquote method for parse_qs and parse_qsl # Cannot use directly from urllib as it would create a circular reference # because urllib uses urlparse methods (urljoin). If you update this function, # update it also in urllib. This code duplication does not existin in Python3. _hexdig = '0123456789ABCDEFabcdef' _hextochr = dict((a+b, chr(int(a+b,16))) for a in _hexdig for b in _hexdig) _asciire = re.compile('([\x00-\x7f]+)') def unquote(s): """unquote('abc%20def') -> 'abc def'.""" if _is_unicode(s): if '%' not in s: return s bits = _asciire.split(s) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote(str(bits[i])).decode('latin1')) append(bits[i + 1]) return ''.join(res) bits = s.split('%') # fastpath if len(bits) == 1: return s res = [bits[0]] append = res.append for item in bits[1:]: try: append(_hextochr[item[:2]]) append(item[2:]) except KeyError: append('%') append(item) return ''.join(res) def parse_qs(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). """ dict = {} for name, value in parse_qsl(qs, keep_blank_values, strict_parsing, max_num_fields): if name in dict: dict[name].append(value) else: dict[name] = [value] return dict def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). Returns a list, as G-d intended. """ # If max_num_fields is defined then check that the number of fields # is less than max_num_fields. This prevents a memory exhaustion DOS # attack via post bodies with many fields. if max_num_fields is not None: num_fields = 1 + qs.count('&') + qs.count(';') if max_num_fields < num_fields: raise ValueError('Max number of fields exceeded') pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError, "bad query field: %r" % (name_value,) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = unquote(nv[0].replace('+', ' ')) value = unquote(nv[1].replace('+', ' ')) r.append((name, value)) return r
./CrossVul/dataset_final_sorted/CWE-255/py/bad_743_1
crossvul-python_data_good_3789_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes import json from keystone import config from keystone import catalog from keystone.common import cms from keystone.common import logging from keystone.common import wsgi from keystone import exception from keystone import identity from keystone.openstack.common import timeutils from keystone import policy from keystone import token LOG = logging.getLogger(__name__) class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/revoked', controller=auth_controller, action='revocation_list', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Certificates used to verify auth tokens mapper.connect('/certificates/ca', controller=auth_controller, action='ca_cert', conditions=dict(method=['GET'])) mapper.connect('/certificates/signing', controller=auth_controller, action='signing_cert', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/certificates/ca', controller=auth_controller, action='ca_cert', conditions=dict(method=['GET'])) mapper.connect('/certificates/signing', controller=auth_controller, action='signing_cert', conditions=dict(method=['GET'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = '%sURL' % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog(context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { 'id': 'v2.0', 'status': 'beta', 'updated': '2011-11-19T00:00:00Z', 'links': [ { 'rel': 'self', 'href': identity_url, }, { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://docs.openstack.org/api/openstack-' 'identity-service/2.0/content/' }, { 'rel': 'describedby', 'type': 'application/pdf', 'href': 'http://docs.openstack.org/api/openstack-' 'identity-service/2.0/identity-dev-guide-' '2.0.pdf' } ], 'media-types': [ { 'base': 'application/json', 'type': 'application/vnd.openstack.identity-v2.0' '+json' }, { 'base': 'application/xml', 'type': 'application/vnd.openstack.identity-v2.0' '+xml' } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ 'versions': { 'values': versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ 'version': versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def ca_cert(self, context, auth=None): ca_file = open(config.CONF.signing.ca_certs, 'r') data = ca_file.read() ca_file.close() return data def signing_cert(self, context, auth=None): cert_file = open(config.CONF.signing.certfile, 'r') data = cert_file.read() cert_file.close() return data def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ if 'passwordCredentials' in auth: user_id = auth['passwordCredentials'].get('userId', None) username = auth['passwordCredentials'].get('username', '') password = auth['passwordCredentials'].get('password', '') tenant_name = auth.get('tenantName', None) if username: try: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) user_id = user_ref['id'] except exception.UserNotFound: raise exception.Unauthorized() # more compat tenant_id = auth.get('tenantId', None) if tenant_name: try: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] except exception.TenantNotFound: raise exception.Unauthorized() try: auth_info = self.identity_api.authenticate(context=context, user_id=user_id, password=password, tenant_id=tenant_id) (user_ref, tenant_ref, metadata_ref) = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_id) raise exception.Unauthorized() # If the tenant is disabled don't allow them to authenticate if tenant_ref and not tenant_ref.get('enabled', True): LOG.warning('Tenant %s is disabled' % tenant_id) raise exception.Unauthorized() except AssertionError as e: raise exception.Unauthorized(e.message) auth_token_data = dict(zip(['user', 'tenant', 'metadata'], auth_info)) expiry = self.token_api._get_default_expire_time(context=context) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} elif 'token' in auth: old_token = auth['token'].get('id', None) tenant_name = auth.get('tenantName') try: old_token_ref = self.token_api.get_token(context=context, token_id=old_token) except exception.NotFound: LOG.warning("Token not found: " + str(old_token)) raise exception.Unauthorized() user_ref = old_token_ref['user'] user_id = user_ref['id'] current_user_ref = self.identity_api.get_user(context=context, user_id=user_id) # If the user is disabled don't allow them to authenticate if not current_user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_id) raise exception.Unauthorized() if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] else: tenant_id = auth.get('tenantId', None) tenants = self.identity_api.get_tenants_for_user(context, user_id) if tenant_id: if not tenant_id in tenants: LOG.warning('User %s is authorized for tenant %s' % (user_id, tenant_id)) raise exception.Unauthorized() expiry = old_token_ref['expires'] try: tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) except exception.TenantNotFound: tenant_ref = None metadata_ref = {} catalog_ref = {} except exception.MetadataNotFound: metadata_ref = {} catalog_ref = {} # If the tenant is disabled don't allow them to authenticate if tenant_ref and not tenant_ref.get('enabled', True): LOG.warning('Tenant %s is disabled' % tenant_id) raise exception.Unauthorized() if tenant_ref: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) auth_token_data = dict(dict(user=current_user_ref, tenant=tenant_ref, metadata=metadata_ref)) auth_token_data['expires'] = expiry auth_token_data['id'] = 'placeholder' roles_ref = [] for role_id in metadata_ref.get('roles', []): role_ref = self.identity_api.get_role(context, role_id) roles_ref.append(dict(name=role_ref['name'])) token_data = self._format_token(auth_token_data, roles_ref) service_catalog = self._format_catalog(catalog_ref) token_data['access']['serviceCatalog'] = service_catalog if config.CONF.signing.token_format == 'UUID': token_id = uuid.uuid4().hex elif config.CONF.signing.token_format == 'PKI': token_id = cms.cms_sign_token(json.dumps(token_data), config.CONF.signing.certfile, config.CONF.signing.keyfile) else: raise exception.UnexpectedError( 'Invalid value for token_format: %s.' ' Allowed values are PKI or UUID.' % config.CONF.signing.token_format) try: self.token_api.create_token( context, token_id, dict(key=token_id, id=token_id, expires=auth_token_data['expires'], user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) except Exception as e: # an identical token may have been created already. # if so, return the token_data as it is also identical try: self.token_api.get_token(context=context, token_id=token_id) except exception.TokenNotFound: raise e token_data['access']['token']['id'] = token_id return token_data def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) if cms.is_ans1_token(token_id): data = json.loads(cms.cms_verify(cms.token_to_cms(token_id), config.CONF.signing.certfile, config.CONF.signing.ca_certs)) data['access']['token']['user'] = data['access']['user'] data['access']['token']['metadata'] = data['access']['metadata'] if belongs_to: assert data['access']['token']['tenant']['id'] == belongs_to token_ref = data['access']['token'] else: token_ref = self.token_api.get_token(context=context, token_id=token_id) return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get('belongsTo') assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get('belongsTo') token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if possible # This is needed for on-behalf-of requests catalog_ref = None if token_ref.get('tenant'): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def revocation_list(self, context, auth=None): self.assert_admin(context) tokens = self.token_api.list_revoked_tokens(context) for t in tokens: expires = t['expires'] if not (expires and isinstance(expires, unicode)): t['expires'] = timeutils.isotime(expires) data = {'revoked': tokens} json_data = json.dumps(data) signed_text = cms.cms_sign_text(json_data, config.CONF.signing.certfile, config.CONF.signing.keyfile) return {'signed': signed_text} def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" self.assert_admin(context) token_ref = self._get_token_ref(context, token_id) catalog_ref = None if token_ref.get('tenant'): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=token_ref['metadata']) return self._format_endpoint_list(catalog_ref) def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: if not isinstance(expires, unicode): expires = timeutils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) if metadata_ref: if 'is_admin' in metadata_ref: o['access']['metadata'] = {'is_admin': metadata_ref['is_admin']} else: o['access']['metadata'] = {'is_admin': 0} if 'roles' in metadata_ref: o['access']['metadata']['roles'] = metadata_ref['roles'] return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() def _format_endpoint_list(self, catalog_ref): """Formats a list of endpoints according to Identity API v2. The v2.0 API wants an endpoint list to look like:: { 'endpoints': [ { 'id': $endpoint_id, 'name': $SERVICE[name], 'type': $SERVICE, 'tenantId': $tenant_id, 'region': $REGION, } ], 'endpoints_links': [], } """ if not catalog_ref: return {} endpoints = [] for region_name, region_ref in catalog_ref.iteritems(): for service_type, service_ref in region_ref.iteritems(): endpoints.append({ 'id': service_ref.get('id'), 'name': service_ref.get('name'), 'type': service_type, 'region': region_name, 'publicURL': service_ref.get('publicURL'), 'internalURL': service_ref.get('internalURL'), 'adminURL': service_ref.get('adminURL'), }) return {'endpoints': endpoints, 'endpoints_links': []} class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': 'https://github.com/openstack/identity-api', } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter()
./CrossVul/dataset_final_sorted/CWE-255/py/good_3789_0
crossvul-python_data_good_743_1
"""Parse (absolute and relative) URLs. urlparse module is based upon the following RFC specifications. RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding and L. Masinter, January 2005. RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter and L.Masinter, December 1999. RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. Berners-Lee, R. Fielding, and L. Masinter, August 1998. RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998. RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June 1995. RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. McCahill, December 1994 RFC 3986 is considered the current standard and any future changes to urlparse module should conform with it. The urlparse module is currently not entirely compliant with this RFC due to defacto scenarios for parsing, and for backward compatibility purposes, some parsing quirks from older RFCs are retained. The testcases in test_urlparse.py provides a good indicator of parsing behavior. """ import re __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", "urlsplit", "urlunsplit", "parse_qs", "parse_qsl"] # A classification of schemes ('' means apply by default) uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap', 'wais', 'file', 'https', 'shttp', 'mms', 'prospero', 'rtsp', 'rtspu', '', 'sftp', 'svn', 'svn+ssh'] uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', 'imap', 'wais', 'file', 'mms', 'https', 'shttp', 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '', 'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh'] uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap', 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', 'mms', '', 'sftp', 'tel'] # These are not actually used anymore, but should stay for backwards # compatibility. (They are undocumented, but have a public-looking name.) non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms', 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', ''] uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais', 'https', 'shttp', 'snews', 'file', 'prospero', ''] # Characters valid in scheme names scheme_chars = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789' '+-.') MAX_CACHE_SIZE = 20 _parse_cache = {} def clear_cache(): """Clear the parse cache.""" _parse_cache.clear() class ResultMixin(object): """Shared methods for the parsed result objects.""" @property def username(self): netloc = self.netloc if "@" in netloc: userinfo = netloc.rsplit("@", 1)[0] if ":" in userinfo: userinfo = userinfo.split(":", 1)[0] return userinfo return None @property def password(self): netloc = self.netloc if "@" in netloc: userinfo = netloc.rsplit("@", 1)[0] if ":" in userinfo: return userinfo.split(":", 1)[1] return None @property def hostname(self): netloc = self.netloc.split('@')[-1] if '[' in netloc and ']' in netloc: return netloc.split(']')[0][1:].lower() elif ':' in netloc: return netloc.split(':')[0].lower() elif netloc == '': return None else: return netloc.lower() @property def port(self): netloc = self.netloc.split('@')[-1].split(']')[-1] if ':' in netloc: port = netloc.split(':')[1] if port: port = int(port, 10) # verify legal port if (0 <= port <= 65535): return port return None from collections import namedtuple class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin): __slots__ = () def geturl(self): return urlunparse(self) def urlparse(url, scheme='', allow_fragments=True): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" tuple = urlsplit(url, scheme, allow_fragments) scheme, netloc, url, query, fragment = tuple if scheme in uses_params and ';' in url: url, params = _splitparams(url) else: params = '' return ParseResult(scheme, netloc, url, params, query, fragment) def _splitparams(url): if '/' in url: i = url.find(';', url.rfind('/')) if i < 0: return url, '' else: i = url.find(';') return url[:i], url[i+1:] def _splitnetloc(url, start=0): delim = len(url) # position of end of domain part of url, default is end for c in '/?#': # look for delimiters; the order is NOT important wdelim = url.find(c, start) # find first of this delim if wdelim >= 0: # if found delim = min(delim, wdelim) # use earliest delim position return url[start:delim], url[delim:] # return (domain, rest) def _checknetloc(netloc): if not netloc or not isinstance(netloc, unicode): return # looking for characters like \u2100 that expand to 'a/c' # IDNA uses NFKC equivalence, so normalize for this check import unicodedata n = netloc.replace(u'@', u'') # ignore characters already included n = n.replace(u':', u'') # but not the surrounding text n = n.replace(u'#', u'') n = n.replace(u'?', u'') netloc2 = unicodedata.normalize('NFKC', n) if n == netloc2: return for c in '/?#@:': if c in netloc2: raise ValueError(u"netloc '" + netloc + u"' contains invalid " + u"characters under NFKC normalization") def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" allow_fragments = bool(allow_fragments) key = url, scheme, allow_fragments, type(url), type(scheme) cached = _parse_cache.get(key, None) if cached: return cached if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case scheme = url[:i].lower() url = url[i+1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return v for c in url[:i]: if c not in scheme_chars: break else: # make sure "url" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i+1:] if not rest or any(c not in '0123456789' for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return v def urlunparse(data): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" scheme, netloc, url, params, query, fragment = data if params: url = "%s;%s" % (url, params) return urlunsplit((scheme, netloc, url, query, fragment)) def urlunsplit(data): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" scheme, netloc, url, query, fragment = data if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if query: url = url + '?' + query if fragment: url = url + '#' + fragment return url def urljoin(base, url, allow_fragments=True): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url if not url: return base bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return url if scheme in uses_netloc: if netloc: return urlunparse((scheme, netloc, path, params, query, fragment)) netloc = bnetloc if path[:1] == '/': return urlunparse((scheme, netloc, path, params, query, fragment)) if not path and not params: path = bpath params = bparams if not query: query = bquery return urlunparse((scheme, netloc, path, params, query, fragment)) segments = bpath.split('/')[:-1] + path.split('/') # XXX The stuff below is bogus in various ways... if segments[-1] == '.': segments[-1] = '' while '.' in segments: segments.remove('.') while 1: i = 1 n = len(segments) - 1 while i < n: if (segments[i] == '..' and segments[i-1] not in ('', '..')): del segments[i-1:i+1] break i = i+1 else: break if segments == ['', '..']: segments[-1] = '' elif len(segments) >= 2 and segments[-1] == '..': segments[-2:] = [''] return urlunparse((scheme, netloc, '/'.join(segments), params, query, fragment)) def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) return defrag, frag else: return url, '' try: unicode except NameError: def _is_unicode(x): return 0 else: def _is_unicode(x): return isinstance(x, unicode) # unquote method for parse_qs and parse_qsl # Cannot use directly from urllib as it would create a circular reference # because urllib uses urlparse methods (urljoin). If you update this function, # update it also in urllib. This code duplication does not existin in Python3. _hexdig = '0123456789ABCDEFabcdef' _hextochr = dict((a+b, chr(int(a+b,16))) for a in _hexdig for b in _hexdig) _asciire = re.compile('([\x00-\x7f]+)') def unquote(s): """unquote('abc%20def') -> 'abc def'.""" if _is_unicode(s): if '%' not in s: return s bits = _asciire.split(s) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote(str(bits[i])).decode('latin1')) append(bits[i + 1]) return ''.join(res) bits = s.split('%') # fastpath if len(bits) == 1: return s res = [bits[0]] append = res.append for item in bits[1:]: try: append(_hextochr[item[:2]]) append(item[2:]) except KeyError: append('%') append(item) return ''.join(res) def parse_qs(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). """ dict = {} for name, value in parse_qsl(qs, keep_blank_values, strict_parsing, max_num_fields): if name in dict: dict[name].append(value) else: dict[name] = [value] return dict def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). Returns a list, as G-d intended. """ # If max_num_fields is defined then check that the number of fields # is less than max_num_fields. This prevents a memory exhaustion DOS # attack via post bodies with many fields. if max_num_fields is not None: num_fields = 1 + qs.count('&') + qs.count(';') if max_num_fields < num_fields: raise ValueError('Max number of fields exceeded') pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError, "bad query field: %r" % (name_value,) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = unquote(nv[0].replace('+', ' ')) value = unquote(nv[1].replace('+', ' ')) r.append((name, value)) return r
./CrossVul/dataset_final_sorted/CWE-255/py/good_743_1
crossvul-python_data_good_741_1
"""Parse (absolute and relative) URLs. urlparse module is based upon the following RFC specifications. RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding and L. Masinter, January 2005. RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter and L.Masinter, December 1999. RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. Berners-Lee, R. Fielding, and L. Masinter, August 1998. RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June 1995. RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. McCahill, December 1994 RFC 3986 is considered the current standard and any future changes to urlparse module should conform with it. The urlparse module is currently not entirely compliant with this RFC due to defacto scenarios for parsing, and for backward compatibility purposes, some parsing quirks from older RFCs are retained. The testcases in test_urlparse.py provides a good indicator of parsing behavior. """ import re import sys import collections __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", "urlsplit", "urlunsplit", "urlencode", "parse_qs", "parse_qsl", "quote", "quote_plus", "quote_from_bytes", "unquote", "unquote_plus", "unquote_to_bytes", "DefragResult", "ParseResult", "SplitResult", "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] # A classification of schemes. # The empty string classifies URLs with no scheme specified, # being the default value returned by “urlsplit” and “urlparse”. uses_relative = ['', 'ftp', 'http', 'gopher', 'nntp', 'imap', 'wais', 'file', 'https', 'shttp', 'mms', 'prospero', 'rtsp', 'rtspu', 'sftp', 'svn', 'svn+ssh', 'ws', 'wss'] uses_netloc = ['', 'ftp', 'http', 'gopher', 'nntp', 'telnet', 'imap', 'wais', 'file', 'mms', 'https', 'shttp', 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh', 'ws', 'wss'] uses_params = ['', 'ftp', 'hdl', 'prospero', 'http', 'imap', 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', 'mms', 'sftp', 'tel'] # These are not actually used anymore, but should stay for backwards # compatibility. (They are undocumented, but have a public-looking name.) non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] uses_query = ['', 'http', 'wais', 'imap', 'https', 'shttp', 'mms', 'gopher', 'rtsp', 'rtspu', 'sip', 'sips'] uses_fragment = ['', 'ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais', 'https', 'shttp', 'snews', 'file', 'prospero'] # Characters valid in scheme names scheme_chars = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789' '+-.') # XXX: Consider replacing with functools.lru_cache MAX_CACHE_SIZE = 20 _parse_cache = {} def clear_cache(): """Clear the parse cache and the quoters cache.""" _parse_cache.clear() _safe_quoters.clear() # Helpers for bytes handling # For 3.2, we deliberately require applications that # handle improperly quoted URLs to do their own # decoding and encoding. If valid use cases are # presented, we may relax this by using latin-1 # decoding internally for 3.3 _implicit_encoding = 'ascii' _implicit_errors = 'strict' def _noop(obj): return obj def _encode_result(obj, encoding=_implicit_encoding, errors=_implicit_errors): return obj.encode(encoding, errors) def _decode_args(args, encoding=_implicit_encoding, errors=_implicit_errors): return tuple(x.decode(encoding, errors) if x else '' for x in args) def _coerce_args(*args): # Invokes decode if necessary to create str args # and returns the coerced inputs along with # an appropriate result coercion function # - noop for str inputs # - encoding function otherwise str_input = isinstance(args[0], str) for arg in args[1:]: # We special-case the empty string to support the # "scheme=''" default argument to some functions if arg and isinstance(arg, str) != str_input: raise TypeError("Cannot mix str and non-str arguments") if str_input: return args + (_noop,) return _decode_args(args) + (_encode_result,) # Result objects are more helpful than simple tuples class _ResultMixinStr(object): """Standard approach to encoding parsed results from str to bytes""" __slots__ = () def encode(self, encoding='ascii', errors='strict'): return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) class _ResultMixinBytes(object): """Standard approach to decoding parsed results from bytes to str""" __slots__ = () def decode(self, encoding='ascii', errors='strict'): return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) class _NetlocResultMixinBase(object): """Shared methods for the parsed result objects containing a netloc element""" __slots__ = () @property def username(self): return self._userinfo[0] @property def password(self): return self._userinfo[1] @property def hostname(self): hostname = self._hostinfo[0] if not hostname: return None # Scoped IPv6 address may have zone info, which must not be lowercased # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys separator = '%' if isinstance(hostname, str) else b'%' hostname, percent, zone = hostname.partition(separator) return hostname.lower() + percent + zone @property def port(self): port = self._hostinfo[1] if port is not None: port = int(port, 10) if not ( 0 <= port <= 65535): raise ValueError("Port out of range 0-65535") return port class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): __slots__ = () @property def _userinfo(self): netloc = self.netloc userinfo, have_info, hostinfo = netloc.rpartition('@') if have_info: username, have_password, password = userinfo.partition(':') if not have_password: password = None else: username = password = None return username, password @property def _hostinfo(self): netloc = self.netloc _, _, hostinfo = netloc.rpartition('@') _, have_open_br, bracketed = hostinfo.partition('[') if have_open_br: hostname, _, port = bracketed.partition(']') _, _, port = port.partition(':') else: hostname, _, port = hostinfo.partition(':') if not port: port = None return hostname, port class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): __slots__ = () @property def _userinfo(self): netloc = self.netloc userinfo, have_info, hostinfo = netloc.rpartition(b'@') if have_info: username, have_password, password = userinfo.partition(b':') if not have_password: password = None else: username = password = None return username, password @property def _hostinfo(self): netloc = self.netloc _, _, hostinfo = netloc.rpartition(b'@') _, have_open_br, bracketed = hostinfo.partition(b'[') if have_open_br: hostname, _, port = bracketed.partition(b']') _, _, port = port.partition(b':') else: hostname, _, port = hostinfo.partition(b':') if not port: port = None return hostname, port from collections import namedtuple _DefragResultBase = namedtuple('DefragResult', 'url fragment') _SplitResultBase = namedtuple( 'SplitResult', 'scheme netloc path query fragment') _ParseResultBase = namedtuple( 'ParseResult', 'scheme netloc path params query fragment') _DefragResultBase.__doc__ = """ DefragResult(url, fragment) A 2-tuple that contains the url without fragment identifier and the fragment identifier as a separate argument. """ _DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" _DefragResultBase.fragment.__doc__ = """ Fragment identifier separated from URL, that allows indirect identification of a secondary resource by reference to a primary resource and additional identifying information. """ _SplitResultBase.__doc__ = """ SplitResult(scheme, netloc, path, query, fragment) A 5-tuple that contains the different components of a URL. Similar to ParseResult, but does not split params. """ _SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" _SplitResultBase.netloc.__doc__ = """ Network location where the request is made to. """ _SplitResultBase.path.__doc__ = """ The hierarchical path, such as the path to a file to download. """ _SplitResultBase.query.__doc__ = """ The query component, that contains non-hierarchical data, that along with data in path component, identifies a resource in the scope of URI's scheme and network location. """ _SplitResultBase.fragment.__doc__ = """ Fragment identifier, that allows indirect identification of a secondary resource by reference to a primary resource and additional identifying information. """ _ParseResultBase.__doc__ = """ ParseResult(scheme, netloc, path, params, query, fragment) A 6-tuple that contains components of a parsed URL. """ _ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ _ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ _ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ _ParseResultBase.params.__doc__ = """ Parameters for last path element used to dereference the URI in order to provide access to perform some operation on the resource. """ _ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ _ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ # For backwards compatibility, alias _NetlocResultMixinStr # ResultBase is no longer part of the documented API, but it is # retained since deprecating it isn't worth the hassle ResultBase = _NetlocResultMixinStr # Structured result objects for string data class DefragResult(_DefragResultBase, _ResultMixinStr): __slots__ = () def geturl(self): if self.fragment: return self.url + '#' + self.fragment else: return self.url class SplitResult(_SplitResultBase, _NetlocResultMixinStr): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResult(_ParseResultBase, _NetlocResultMixinStr): __slots__ = () def geturl(self): return urlunparse(self) # Structured result objects for bytes data class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): __slots__ = () def geturl(self): if self.fragment: return self.url + b'#' + self.fragment else: return self.url class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): __slots__ = () def geturl(self): return urlunparse(self) # Set up the encode/decode result pairs def _fix_result_transcoding(): _result_pairs = ( (DefragResult, DefragResultBytes), (SplitResult, SplitResultBytes), (ParseResult, ParseResultBytes), ) for _decoded, _encoded in _result_pairs: _decoded._encoded_counterpart = _encoded _encoded._decoded_counterpart = _decoded _fix_result_transcoding() del _fix_result_transcoding def urlparse(url, scheme='', allow_fragments=True): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) splitresult = urlsplit(url, scheme, allow_fragments) scheme, netloc, url, query, fragment = splitresult if scheme in uses_params and ';' in url: url, params = _splitparams(url) else: params = '' result = ParseResult(scheme, netloc, url, params, query, fragment) return _coerce_result(result) def _splitparams(url): if '/' in url: i = url.find(';', url.rfind('/')) if i < 0: return url, '' else: i = url.find(';') return url[:i], url[i+1:] def _splitnetloc(url, start=0): delim = len(url) # position of end of domain part of url, default is end for c in '/?#': # look for delimiters; the order is NOT important wdelim = url.find(c, start) # find first of this delim if wdelim >= 0: # if found delim = min(delim, wdelim) # use earliest delim position return url[start:delim], url[delim:] # return (domain, rest) def _checknetloc(netloc): if not netloc or netloc.isascii(): return # looking for characters like \u2100 that expand to 'a/c' # IDNA uses NFKC equivalence, so normalize for this check import unicodedata n = netloc.replace('@', '') # ignore characters already included n = n.replace(':', '') # but not the surrounding text n = n.replace('#', '') n = n.replace('?', '') netloc2 = unicodedata.normalize('NFKC', n) if n == netloc2: return for c in '/?#@:': if c in netloc2: raise ValueError("netloc '" + netloc + "' contains invalid " + "characters under NFKC normalization") def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) allow_fragments = bool(allow_fragments) key = url, scheme, allow_fragments, type(url), type(scheme) cached = _parse_cache.get(key, None) if cached: return _coerce_result(cached) if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case url = url[i+1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult('http', netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) for c in url[:i]: if c not in scheme_chars: break else: # make sure "url" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i+1:] if not rest or any(c not in '0123456789' for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) def urlunparse(components): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" scheme, netloc, url, params, query, fragment, _coerce_result = ( _coerce_args(*components)) if params: url = "%s;%s" % (url, params) return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) def urlunsplit(components): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" scheme, netloc, url, query, fragment, _coerce_result = ( _coerce_args(*components)) if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if query: url = url + '?' + query if fragment: url = url + '#' + fragment return _coerce_result(url) def urljoin(base, url, allow_fragments=True): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url if not url: return base base, url, _coerce_result = _coerce_args(base, url) bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return _coerce_result(url) if scheme in uses_netloc: if netloc: return _coerce_result(urlunparse((scheme, netloc, path, params, query, fragment))) netloc = bnetloc if not path and not params: path = bpath params = bparams if not query: query = bquery return _coerce_result(urlunparse((scheme, netloc, path, params, query, fragment))) base_parts = bpath.split('/') if base_parts[-1] != '': # the last item is not a directory, so will not be taken into account # in resolving the relative path del base_parts[-1] # for rfc3986, ignore all base path should the first character be root. if path[:1] == '/': segments = path.split('/') else: segments = base_parts + path.split('/') # filter out elements that would cause redundant slashes on re-joining # the resolved_path segments[1:-1] = filter(None, segments[1:-1]) resolved_path = [] for seg in segments: if seg == '..': try: resolved_path.pop() except IndexError: # ignore any .. segments that would otherwise cause an IndexError # when popped from resolved_path if resolving for rfc3986 pass elif seg == '.': continue else: resolved_path.append(seg) if segments[-1] in ('.', '..'): # do some post-processing here. if the last segment was a relative dir, # then we need to append the trailing '/' resolved_path.append('') return _coerce_result(urlunparse((scheme, netloc, '/'.join( resolved_path) or '/', params, query, fragment))) def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ url, _coerce_result = _coerce_args(url) if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) else: frag = '' defrag = url return _coerce_result(DefragResult(defrag, frag)) _hexdig = '0123456789ABCDEFabcdef' _hextobyte = None def unquote_to_bytes(string): """unquote_to_bytes('abc%20def') -> b'abc def'.""" # Note: strings are encoded as UTF-8. This is only an issue if it contains # unescaped non-ASCII characters, which URIs should not. if not string: # Is it a string-like object? string.split return b'' if isinstance(string, str): string = string.encode('utf-8') bits = string.split(b'%') if len(bits) == 1: return string res = [bits[0]] append = res.append # Delay the initialization of the table to not waste memory # if the function is never called global _hextobyte if _hextobyte is None: _hextobyte = {(a + b).encode(): bytes.fromhex(a + b) for a in _hexdig for b in _hexdig} for item in bits[1:]: try: append(_hextobyte[item[:2]]) append(item[2:]) except KeyError: append(b'%') append(item) return b''.join(res) _asciire = re.compile('([\x00-\x7f]+)') def unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res) def parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. encoding and errors: specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). Returns a dictionary. """ parsed_result = {} pairs = parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors, max_num_fields=max_num_fields) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. encoding and errors: specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. max_num_fields: int. If set, then throws a ValueError if there are more than n fields read by parse_qsl(). Returns a list, as G-d intended. """ qs, _coerce_result = _coerce_args(qs) # If max_num_fields is defined then check that the number of fields # is less than max_num_fields. This prevents a memory exhaustion DOS # attack via post bodies with many fields. if max_num_fields is not None: num_fields = 1 + qs.count('&') + qs.count(';') if max_num_fields < num_fields: raise ValueError('Max number of fields exceeded') pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def unquote_plus(string, encoding='utf-8', errors='replace'): """Like unquote(), but also replace plus signs by spaces, as required for unquoting HTML form values. unquote_plus('%7e/abc+def') -> '~/abc def' """ string = string.replace('+', ' ') return unquote(string, encoding, errors) _ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' b'abcdefghijklmnopqrstuvwxyz' b'0123456789' b'_.-~') _ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) _safe_quoters = {} class Quoter(collections.defaultdict): """A mapping from bytes (in range(0,256)) to strings. String values are percent-encoded byte values, unless the key < 128, and in the "safe" set (either the specified safe set, or default set). """ # Keeps a cache internally, using defaultdict, for efficiency (lookups # of cached keys don't call Python code at all). def __init__(self, safe): """safe: bytes object.""" self.safe = _ALWAYS_SAFE.union(safe) def __repr__(self): # Without this, will just display as a defaultdict return "<%s %r>" % (self.__class__.__name__, dict(self)) def __missing__(self, b): # Handle a cache miss. Store quoted string in cache and return. res = chr(b) if b in self.safe else '%{:02X}'.format(b) self[b] = res return res def quote(string, safe='/', encoding=None, errors=None): """quote('abc def') -> 'abc%20def' Each part of a URL, e.g. the path info, the query, etc., has a different set of reserved characters that must be quoted. The quote function offers a cautious (not minimal) way to quote a string for most of these parts. RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists the following (un)reserved characters. unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" reserved = gen-delims / sub-delims gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" Each of the reserved characters is reserved in some component of a URL, but not necessarily in all of them. The quote function %-escapes all characters that are neither in the unreserved chars ("always safe") nor the additional chars set via the safe arg. The default for the safe arg is '/'. The character is reserved, but in typical usage the quote function is being called on a path where the existing slash characters are to be preserved. Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. Now, "~" is included in the set of unreserved characters. string and safe may be either str or bytes objects. encoding and errors must not be specified if string is a bytes object. The optional encoding and errors parameters specify how to deal with non-ASCII characters, as accepted by the str.encode method. By default, encoding='utf-8' (characters are encoded with UTF-8), and errors='strict' (unsupported characters raise a UnicodeEncodeError). """ if isinstance(string, str): if not string: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'strict' string = string.encode(encoding, errors) else: if encoding is not None: raise TypeError("quote() doesn't support 'encoding' for bytes") if errors is not None: raise TypeError("quote() doesn't support 'errors' for bytes") return quote_from_bytes(string, safe) def quote_plus(string, safe='', encoding=None, errors=None): """Like quote(), but also replace ' ' with '+', as required for quoting HTML form values. Plus signs in the original string are escaped unless they are included in safe. It also does not have safe default to '/'. """ # Check if ' ' in string, where string may either be a str or bytes. If # there are no spaces, the regular quote will produce the right answer. if ((isinstance(string, str) and ' ' not in string) or (isinstance(string, bytes) and b' ' not in string)): return quote(string, safe, encoding, errors) if isinstance(safe, str): space = ' ' else: space = b' ' string = quote(string, safe + space, encoding, errors) return string.replace(' ', '+') def quote_from_bytes(bs, safe='/'): """Like quote(), but accepts a bytes object rather than a str, and does not perform string-to-bytes encoding. It always returns an ASCII string. quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' """ if not isinstance(bs, (bytes, bytearray)): raise TypeError("quote_from_bytes() expected bytes") if not bs: return '' if isinstance(safe, str): # Normalize 'safe' by converting to bytes and removing non-ASCII chars safe = safe.encode('ascii', 'ignore') else: safe = bytes([c for c in safe if c < 128]) if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): return bs.decode() try: quoter = _safe_quoters[safe] except KeyError: _safe_quoters[safe] = quoter = Quoter(safe).__getitem__ return ''.join([quoter(char) for char in bs]) def urlencode(query, doseq=False, safe='', encoding=None, errors=None, quote_via=quote_plus): """Encode a dict or sequence of two-element tuples into a URL query string. If any values in the query arg are sequences and doseq is true, each sequence element is converted to a separate parameter. If the query arg is a sequence of two-element tuples, the order of the parameters in the output will match the order of parameters in the input. The components of a query arg may each be either a string or a bytes type. The safe, encoding, and errors parameters are passed down to the function specified by quote_via (encoding and errors only if a component is a str). """ if hasattr(query, "items"): query = query.items() else: # It's a bother at times that strings and string-like objects are # sequences. try: # non-sequence items should not work with len() # non-empty strings will fail this if len(query) and not isinstance(query[0], tuple): raise TypeError # Zero-length sequences of all types will get here and succeed, # but that's a minor nit. Since the original implementation # allowed empty dicts that type of behavior probably should be # preserved for consistency except TypeError: ty, va, tb = sys.exc_info() raise TypeError("not a valid non-string sequence " "or mapping object").with_traceback(tb) l = [] if not doseq: for k, v in query: if isinstance(k, bytes): k = quote_via(k, safe) else: k = quote_via(str(k), safe, encoding, errors) if isinstance(v, bytes): v = quote_via(v, safe) else: v = quote_via(str(v), safe, encoding, errors) l.append(k + '=' + v) else: for k, v in query: if isinstance(k, bytes): k = quote_via(k, safe) else: k = quote_via(str(k), safe, encoding, errors) if isinstance(v, bytes): v = quote_via(v, safe) l.append(k + '=' + v) elif isinstance(v, str): v = quote_via(v, safe, encoding, errors) l.append(k + '=' + v) else: try: # Is this a sufficient test for sequence-ness? x = len(v) except TypeError: # not a sequence v = quote_via(str(v), safe, encoding, errors) l.append(k + '=' + v) else: # loop over the sequence for elt in v: if isinstance(elt, bytes): elt = quote_via(elt, safe) else: elt = quote_via(str(elt), safe, encoding, errors) l.append(k + '=' + elt) return '&'.join(l) def to_bytes(url): """to_bytes(u"URL") --> 'URL'.""" # Most URL schemes require ASCII. If that changes, the conversion # can be relaxed. # XXX get rid of to_bytes() if isinstance(url, str): try: url = url.encode("ASCII").decode() except UnicodeError: raise UnicodeError("URL " + repr(url) + " contains non-ASCII characters") return url def unwrap(url): """unwrap('<URL:type://host/path>') --> 'type://host/path'.""" url = str(url).strip() if url[:1] == '<' and url[-1:] == '>': url = url[1:-1].strip() if url[:4] == 'URL:': url = url[4:].strip() return url _typeprog = None def splittype(url): """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" global _typeprog if _typeprog is None: _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) match = _typeprog.match(url) if match: scheme, data = match.groups() return scheme.lower(), data return None, url _hostprog = None def splithost(url): """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" global _hostprog if _hostprog is None: _hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL) match = _hostprog.match(url) if match: host_port, path = match.groups() if path and path[0] != '/': path = '/' + path return host_port, path return None, url def splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" user, delim, host = host.rpartition('@') return (user if delim else None), host def splitpasswd(user): """splitpasswd('user:passwd') -> 'user', 'passwd'.""" user, delim, passwd = user.partition(':') return user, (passwd if delim else None) # splittag('/path#tag') --> '/path', 'tag' _portprog = None def splitport(host): """splitport('host:port') --> 'host', 'port'.""" global _portprog if _portprog is None: _portprog = re.compile('(.*):([0-9]*)$', re.DOTALL) match = _portprog.match(host) if match: host, port = match.groups() if port: return host, port return host, None def splitnport(host, defport=-1): """Split host and port, returning numeric port. Return given default port if no ':' found; defaults to -1. Return numerical port if a valid number are found after ':'. Return None if ':' but not a valid number.""" host, delim, port = host.rpartition(':') if not delim: host = port elif port: try: nport = int(port) except ValueError: nport = None return host, nport return host, defport def splitquery(url): """splitquery('/path?query') --> '/path', 'query'.""" path, delim, query = url.rpartition('?') if delim: return path, query return url, None def splittag(url): """splittag('/path#tag') --> '/path', 'tag'.""" path, delim, tag = url.rpartition('#') if delim: return path, tag return url, None def splitattr(url): """splitattr('/path;attr1=value1;attr2=value2;...') -> '/path', ['attr1=value1', 'attr2=value2', ...].""" words = url.split(';') return words[0], words[1:] def splitvalue(attr): """splitvalue('attr=value') --> 'attr', 'value'.""" attr, delim, value = attr.partition('=') return attr, (value if delim else None)
./CrossVul/dataset_final_sorted/CWE-255/py/good_741_1
crossvul-python_data_bad_3791_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the EC2 Credentials service. This service allows the creation of access/secret credentials used for the ec2 interop layer of OpenStack. A user can create as many access/secret pairs, each of which map to a specific tenant. This is required because OpenStack supports a user belonging to multiple tenants, whereas the signatures created on ec2-style requests don't allow specification of which tenant the user wishs to act upon. To complete the cycle, we provide a method that OpenStack services can use to validate a signature and get a corresponding openstack token. This token allows method calls to other services within the context the access/secret was created. As an example, nova requests keystone to validate the signature of a request, receives a token, and then makes a request to glance to list images needed to perform the requested task. """ import uuid from keystone import catalog from keystone.common import manager from keystone.common import utils from keystone.common import wsgi from keystone import config from keystone import exception from keystone import identity from keystone import policy from keystone import service from keystone import token CONF = config.CONF class Manager(manager.Manager): """Default pivot point for the EC2 Credentials backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.ec2.driver) class Ec2Extension(wsgi.ExtensionRouter): def add_routes(self, mapper): ec2_controller = Ec2Controller() # validation mapper.connect( '/ec2tokens', controller=ec2_controller, action='authenticate', conditions=dict(method=['POST'])) # crud mapper.connect( '/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='create_credential', conditions=dict(method=['POST'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='get_credentials', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='get_credential', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='delete_credential', conditions=dict(method=['DELETE'])) class Ec2Controller(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() self.ec2_api = Manager() super(Ec2Controller, self).__init__() def check_signature(self, creds_ref, credentials): signer = utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) if utils.auth_str_equal(credentials['signature'], signature): return # NOTE(vish): Some libraries don't use the port when signing # requests, so try again without port. elif ':' in credentials['signature']: hostname, _port = credentials['host'].split(':') credentials['host'] = hostname signature = signer.generate(credentials) if not utils.auth_str_equal(credentials.signature, signature): raise exception.Unauthorized(message='Invalid EC2 signature.') else: raise exception.Unauthorized(message='EC2 signature not supplied.') def authenticate(self, context, credentials=None, ec2Credentials=None): """Validate a signed EC2 request and provide a token. Other services (such as Nova) use this **admin** call to determine if a request they signed received is from a valid user. If it is a valid signature, an openstack token that maps to the user/tenant is returned to the caller, along with all the other details returned from a normal token validation call. The returned token is useful for making calls to other OpenStack services within the context of the request. :param context: standard context :param credentials: dict of ec2 signature :param ec2Credentials: DEPRECATED dict of ec2 signature :returns: token: openstack token equivalent to access key along with the corresponding service catalog and roles """ # FIXME(ja): validate that a service token was used! # NOTE(termie): backwards compat hack if not credentials and ec2Credentials: credentials = ec2Credentials if not 'access' in credentials: raise exception.Unauthorized(message='EC2 signature not supplied.') creds_ref = self._get_credentials(context, credentials['access']) self.check_signature(creds_ref, credentials) # TODO(termie): don't create new tokens every time # TODO(termie): this is copied from TokenController.authenticate token_id = uuid.uuid4().hex tenant_ref = self.identity_api.get_tenant( context=context, tenant_id=creds_ref['tenant_id']) user_ref = self.identity_api.get_user( context=context, user_id=creds_ref['user_id']) metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # TODO(termie): make this a util function or something # TODO(termie): i don't think the ec2 middleware currently expects a # full return, but it contains a note saying that it # would be better to expect a full return token_controller = service.TokenController() return token_controller._format_authenticate( token_ref, roles_ref, catalog_ref) def create_credential(self, context, user_id, tenant_id): """Create a secret/access pair for use with ec2 style auth. Generates a new set of credentials that map the the user/tenant pair. :param context: standard context :param user_id: id of user :param tenant_id: id of tenant :returns: credential: dict of ec2 credential """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) self._assert_valid_tenant_id(context, tenant_id) cred_ref = {'user_id': user_id, 'tenant_id': tenant_id, 'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex} self.ec2_api.create_credential(context, cred_ref['access'], cred_ref) return {'credential': cred_ref} def get_credentials(self, context, user_id): """List all credentials for a user. :param context: standard context :param user_id: id of user :returns: credentials: list of ec2 credential dicts """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) return {'credentials': self.ec2_api.list_credentials(context, user_id)} def get_credential(self, context, user_id, credential_id): """Retreive a user's access/secret pair by the access key. Grab the full access/secret pair for a given access key. :param context: standard context :param user_id: id of user :param credential_id: access key for credentials :returns: credential: dict of ec2 credential """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_valid_user_id(context, user_id) creds = self._get_credentials(context, credential_id) return {'credential': creds} def delete_credential(self, context, user_id, credential_id): """Delete a user's access/secret pair. Used to revoke a user's access/secret pair :param context: standard context :param user_id: id of user :param credential_id: access key for credentials :returns: bool: success """ if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_owner(context, user_id, credential_id) self._assert_valid_user_id(context, user_id) self._get_credentials(context, credential_id) return self.ec2_api.delete_credential(context, credential_id) def _get_credentials(self, context, credential_id): """Return credentials from an ID. :param context: standard context :param credential_id: id of credential :raises exception.Unauthorized: when credential id is invalid :returns: credential: dict of ec2 credential. """ creds = self.ec2_api.get_credential(context, credential_id) if not creds: raise exception.Unauthorized(message='EC2 access key not found.') return creds def _assert_identity(self, context, user_id): """Check that the provided token belongs to the user. :param context: standard context :param user_id: id of user :raises exception.Forbidden: when token is invalid """ try: token_ref = self.token_api.get_token( context=context, token_id=context['token_id']) except exception.TokenNotFound: raise exception.Unauthorized() token_user_id = token_ref['user'].get('id') if not token_user_id == user_id: raise exception.Forbidden() def _is_admin(self, context): """Wrap admin assertion error return statement. :param context: standard context :returns: bool: success """ try: self.assert_admin(context) return True except exception.Forbidden: return False def _assert_owner(self, context, user_id, credential_id): """Ensure the provided user owns the credential. :param context: standard context :param user_id: expected credential owner :param credential_id: id of credential object :raises exception.Forbidden: on failure """ cred_ref = self.ec2_api.get_credential(context, credential_id) if not user_id == cred_ref['user_id']: raise exception.Forbidden() def _assert_valid_user_id(self, context, user_id): """Ensure a valid user id. :param context: standard context :param user_id: expected credential owner :raises exception.UserNotFound: on failure """ user_ref = self.identity_api.get_user( context=context, user_id=user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) def _assert_valid_tenant_id(self, context, tenant_id): """Ensure a valid tenant id. :param context: standard context :param user_id: expected credential owner :raises exception.UserNotFound: on failure """ tenant_ref = self.identity_api.get_tenant( context=context, tenant_id=tenant_id) if not tenant_ref: raise exception.TenantNotFound(tenant_id=tenant_id)
./CrossVul/dataset_final_sorted/CWE-255/py/bad_3791_0