id stringlengths 28 33 | content stringlengths 14 265k ⌀ | max_stars_repo_path stringlengths 49 55 |
|---|---|---|
crossvul-python_data_good_2503_0 | import base64
import re
from datetime import datetime
import logging
import ssl
from xml.etree import ElementTree
import iso8601
import six
import recurly
import recurly.errors
from recurly.link_header import parse_link_value
from six.moves import http_client
from six.moves.urllib.parse import urlencode, urlsplit, quote
class Money(object):
"""An amount of money in one or more currencies."""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Money may be single currency or multi-currency but not both")
elif kwargs:
self.currencies = dict(kwargs)
elif args and len(args) > 1:
raise ValueError("Multi-currency Money must be instantiated with codes")
elif args:
self.currencies = { recurly.DEFAULT_CURRENCY: args[0] }
else:
self.currencies = dict()
@classmethod
def from_element(cls, elem):
currency = dict()
for child_el in elem:
if not child_el.tag:
continue
currency[child_el.tag] = int(child_el.text)
return cls(**currency)
def add_to_element(self, elem):
for currency, amount in self.currencies.items():
currency_el = ElementTree.Element(currency)
currency_el.attrib['type'] = 'integer'
currency_el.text = six.text_type(amount)
elem.append(currency_el)
def __getitem__(self, name):
return self.currencies[name]
def __setitem__(self, name, value):
self.currencies[name] = value
def __delitem__(self, name, value):
del self.currencies[name]
def __contains__(self, name):
return name in self.currencies
class PageError(ValueError):
"""An error raised when requesting to continue to a stream page that
doesn't exist.
This error can be raised when requesting the next page for the last page in
a series, or the first page for the first page in a series.
"""
pass
class Page(list):
"""A set of related `Resource` instances retrieved together from
the API.
Use `Page` instances as `list` instances to access their contents.
"""
def __iter__(self):
if not self:
raise StopIteration
page = self
while page:
for x in list.__iter__(page):
yield x
try:
page = page.next_page()
except PageError:
try:
del self.next_url
except AttributeError:
pass
raise StopIteration
def next_page(self):
"""Return the next `Page` after this one in the result sequence
it's from.
If the current page is the last page in the sequence, calling
this method raises a `ValueError`.
"""
try:
next_url = self.next_url
except AttributeError:
raise PageError("Page %r has no next page" % self)
return self.page_for_url(next_url)
def first_page(self):
"""Return the first `Page` in the result sequence this `Page`
instance is from.
If the current page is already the first page in the sequence,
calling this method raises a `ValueError`.
"""
try:
start_url = self.start_url
except AttributeError:
raise PageError("Page %r is already the first page" % self)
return self.page_for_url(start_url)
@classmethod
def page_for_url(cls, url):
"""Return a new `Page` containing the items at the given
endpoint URL."""
resp, elem = Resource.element_for_url(url)
value = Resource.value_for_element(elem)
return cls.page_for_value(resp, value)
@classmethod
def count_for_url(cls, url):
"""Return the count of server side resources given a url"""
headers = Resource.headers_for_url(url)
return int(headers['X-Records'])
@classmethod
def page_for_value(cls, resp, value):
"""Return a new `Page` representing the given resource `value`
retrieved using the HTTP response `resp`.
This method records pagination ``Link`` headers present in `resp`, so
that the returned `Page` can return their resources from its
`next_page()` and `first_page()` methods.
"""
page = cls(value)
links = parse_link_value(resp.getheader('Link'))
for url, data in six.iteritems(links):
if data.get('rel') == 'start':
page.start_url = url
if data.get('rel') == 'next':
page.next_url = url
return page
class Resource(object):
"""A Recurly API resource.
This superclass implements the general behavior for all the
specific Recurly API resources.
All method parameters and return values that are XML elements are
`xml.etree.ElementTree.Element` instances.
"""
_classes_for_nodename = dict()
sensitive_attributes = ()
"""Attributes that are not logged with the rest of a `Resource`
of this class when submitted in a ``POST`` or ``PUT`` request."""
xml_attribute_attributes = ()
"""Attributes of a `Resource` of this class that are not serialized
as subelements, but rather attributes of the top level element."""
inherits_currency = False
"""Whether a `Resource` of this class inherits a currency from a
parent `Resource`, and therefore should not use `Money` instances
even though this `Resource` class has no ``currency`` attribute of
its own."""
def serializable_attributes(self):
""" Attributes to be serialized in a ``POST`` or ``PUT`` request.
Returns all attributes unless a blacklist is specified
"""
if hasattr(self, 'blacklist_attributes'):
return [attr for attr in self.attributes if attr not in
self.blacklist_attributes]
else:
return self.attributes
def __init__(self, **kwargs):
try:
self.attributes.index('currency') # Test for currency attribute,
self.currency # and test if it's set.
except ValueError:
pass
except AttributeError:
self.currency = recurly.DEFAULT_CURRENCY
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classmethod
def http_request(cls, url, method='GET', body=None, headers=None):
"""Make an HTTP request with the given method to the given URL,
returning the resulting `http_client.HTTPResponse` instance.
If the `body` argument is a `Resource` instance, it is serialized
to XML by calling its `to_element()` method before submitting it.
Requests are authenticated per the Recurly API specification
using the ``recurly.API_KEY`` value for the API key.
Requests and responses are logged at the ``DEBUG`` level to the
``recurly.http.request`` and ``recurly.http.response`` loggers
respectively.
"""
if recurly.API_KEY is None:
raise recurly.UnauthorizedError('recurly.API_KEY not set')
is_non_ascii = lambda s: any(ord(c) >= 128 for c in s)
if is_non_ascii(recurly.API_KEY) or is_non_ascii(recurly.SUBDOMAIN):
raise recurly.ConfigurationError("""Setting API_KEY or SUBDOMAIN to
unicode strings may cause problems. Please use strings.
Issue described here:
https://gist.github.com/maximehardy/d3a0a6427d2b6791b3dc""")
urlparts = urlsplit(url)
connection_options = {}
if recurly.SOCKET_TIMEOUT_SECONDS:
connection_options['timeout'] = recurly.SOCKET_TIMEOUT_SECONDS
if urlparts.scheme != 'https':
connection = http_client.HTTPConnection(urlparts.netloc, **connection_options)
elif recurly.CA_CERTS_FILE is None:
connection = http_client.HTTPSConnection(urlparts.netloc, **connection_options)
else:
connection_options['context'] = ssl.create_default_context(cafile=recurly.CA_CERTS_FILE)
connection = http_client.HTTPSConnection(urlparts.netloc, **connection_options)
headers = {} if headers is None else dict(headers)
headers.setdefault('Accept', 'application/xml')
headers.update({
'User-Agent': recurly.USER_AGENT
})
headers['X-Api-Version'] = recurly.api_version()
headers['Authorization'] = 'Basic %s' % base64.b64encode(six.b('%s:' % recurly.API_KEY)).decode()
log = logging.getLogger('recurly.http.request')
if log.isEnabledFor(logging.DEBUG):
log.debug("%s %s HTTP/1.1", method, url)
for header, value in six.iteritems(headers):
if header == 'Authorization':
value = '<redacted>'
log.debug("%s: %s", header, value)
log.debug('')
if method in ('POST', 'PUT') and body is not None:
if isinstance(body, Resource):
log.debug(body.as_log_output())
else:
log.debug(body)
if isinstance(body, Resource):
body = ElementTree.tostring(body.to_element(), encoding='UTF-8')
headers['Content-Type'] = 'application/xml; charset=utf-8'
if method in ('POST', 'PUT') and body is None:
headers['Content-Length'] = '0'
connection.request(method, url, body, headers)
resp = connection.getresponse()
resp_headers = cls.headers_as_dict(resp)
log = logging.getLogger('recurly.http.response')
if log.isEnabledFor(logging.DEBUG):
log.debug("HTTP/1.1 %d %s", resp.status, resp.reason)
log.debug(resp_headers)
log.debug('')
recurly.cache_rate_limit_headers(resp_headers)
return resp
@classmethod
def headers_as_dict(cls, resp):
"""Turns an array of response headers into a dictionary"""
if six.PY2:
pairs = [header.split(': ') for header in resp.msg.headers]
return dict([(k, v.strip()) for k, v in pairs])
else:
return dict([(k, v.strip()) for k, v in resp.msg._headers])
def as_log_output(self):
"""Returns an XML string containing a serialization of this
instance suitable for logging.
Attributes named in the instance's `sensitive_attributes` are
redacted.
"""
elem = self.to_element()
for attrname in self.sensitive_attributes:
for sensitive_el in elem.iter(attrname):
sensitive_el.text = 'XXXXXXXXXXXXXXXX'
return ElementTree.tostring(elem, encoding='UTF-8')
@classmethod
def _learn_nodenames(cls, classes):
for resource_class in classes:
try:
rc_is_subclass = issubclass(resource_class, cls)
except TypeError:
continue
if not rc_is_subclass:
continue
nodename = getattr(resource_class, 'nodename', None)
if nodename is None:
continue
cls._classes_for_nodename[nodename] = resource_class
@classmethod
def get(cls, uuid):
"""Return a `Resource` instance of this class identified by
the given code or UUID.
Only `Resource` classes with specified `member_path` attributes
can be directly requested with this method.
"""
uuid = quote(str(uuid))
url = recurly.base_uri() + (cls.member_path % (uuid,))
resp, elem = cls.element_for_url(url)
return cls.from_element(elem)
@classmethod
def headers_for_url(cls, url):
"""Return the headers only for the given URL as a dict"""
response = cls.http_request(url, method='HEAD')
if response.status != 200:
cls.raise_http_error(response)
return Resource.headers_as_dict(response)
@classmethod
def element_for_url(cls, url):
"""Return the resource at the given URL, as a
(`http_client.HTTPResponse`, `xml.etree.ElementTree.Element`) tuple
resulting from a ``GET`` request to that URL."""
response = cls.http_request(url)
if response.status != 200:
cls.raise_http_error(response)
assert response.getheader('Content-Type').startswith('application/xml')
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
response_doc = ElementTree.fromstring(response_xml)
return response, response_doc
@classmethod
def _subclass_for_nodename(cls, nodename):
try:
return cls._classes_for_nodename[nodename]
except KeyError:
raise ValueError("Could not determine resource class for array member with tag %r"
% nodename)
@classmethod
def value_for_element(cls, elem):
"""Deserialize the given XML `Element` into its representative
value.
Depending on the content of the element, the returned value may be:
* a string, integer, or boolean value
* a `datetime.datetime` instance
* a list of `Resource` instances
* a single `Resource` instance
* a `Money` instance
* ``None``
"""
log = logging.getLogger('recurly.resource')
if elem is None:
log.debug("Converting %r element into None value", elem)
return
if elem.attrib.get('nil') is not None:
log.debug("Converting %r element with nil attribute into None value", elem.tag)
return
if elem.tag.endswith('_in_cents') and 'currency' not in cls.attributes and not cls.inherits_currency:
log.debug("Converting %r element in class with no matching 'currency' into a Money value", elem.tag)
return Money.from_element(elem)
attr_type = elem.attrib.get('type')
log.debug("Converting %r element with type %r", elem.tag, attr_type)
if attr_type == 'integer':
return int(elem.text.strip())
if attr_type == 'float':
return float(elem.text.strip())
if attr_type == 'boolean':
return elem.text.strip() == 'true'
if attr_type == 'datetime':
return iso8601.parse_date(elem.text.strip())
if attr_type == 'array':
return [cls._subclass_for_nodename(sub_elem.tag).from_element(sub_elem) for sub_elem in elem]
# Unknown types may be the names of resource classes.
if attr_type is not None:
try:
value_class = cls._subclass_for_nodename(attr_type)
except ValueError:
log.debug("Not converting %r element with type %r to a resource as that matches no known nodename",
elem.tag, attr_type)
else:
return value_class.from_element(elem)
# Untyped complex elements should still be resource instances. Guess from the nodename.
if len(elem): # has children
value_class = cls._subclass_for_nodename(elem.tag)
log.debug("Converting %r tag into a %s", elem.tag, value_class.__name__)
return value_class.from_element(elem)
value = elem.text or ''
return value.strip()
@classmethod
def element_for_value(cls, attrname, value):
"""Serialize the given value into an XML `Element` with the
given tag name, returning it.
The value argument may be:
* a `Resource` instance
* a `Money` instance
* a `datetime.datetime` instance
* a string, integer, or boolean value
* ``None``
* a list or tuple of these values
"""
if isinstance(value, Resource):
if attrname in cls._classes_for_nodename:
# override the child's node name with this attribute name
return value.to_element(attrname)
return value.to_element()
el = ElementTree.Element(attrname)
if value is None:
el.attrib['nil'] = 'nil'
elif isinstance(value, bool):
el.attrib['type'] = 'boolean'
el.text = 'true' if value else 'false'
elif isinstance(value, int):
el.attrib['type'] = 'integer'
el.text = str(value)
elif isinstance(value, datetime):
el.attrib['type'] = 'datetime'
el.text = value.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(value, list) or isinstance(value, tuple):
for sub_resource in value:
if hasattr(sub_resource, 'to_element'):
el.append(sub_resource.to_element())
else:
el.append(cls.element_for_value(re.sub(r"s$", "", attrname), sub_resource))
elif isinstance(value, Money):
value.add_to_element(el)
else:
el.text = six.text_type(value)
return el
@classmethod
def paginated(self, url):
""" Exposes Page.page_for_url in Resource """
return Page.page_for_url(url)
@classmethod
def from_element(cls, elem):
"""Return a new instance of this `Resource` class representing
the given XML element."""
return cls().update_from_element(elem)
def update_from_element(self, elem):
"""Reset this `Resource` instance to represent the values in
the given XML element."""
self._elem = elem
for attrname in self.attributes:
try:
delattr(self, attrname)
except AttributeError:
pass
document_url = elem.attrib.get('href')
if document_url is not None:
self._url = document_url
return self
def _make_actionator(self, url, method, extra_handler=None):
def actionator(*args, **kwargs):
if kwargs:
full_url = '%s?%s' % (url, urlencode(kwargs))
else:
full_url = url
body = args[0] if args else None
response = self.http_request(full_url, method, body)
if response.status == 200:
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
return self.update_from_element(ElementTree.fromstring(response_xml))
elif response.status == 201:
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
elem = ElementTree.fromstring(response_xml)
return self.value_for_element(elem)
elif response.status == 204:
pass
elif extra_handler is not None:
return extra_handler(response)
else:
self.raise_http_error(response)
return actionator
#usually the path is the same as the element name
def __getpath__(self, name):
return name
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(name)
try:
selfnode = self._elem
except AttributeError:
raise AttributeError(name)
if name in self.xml_attribute_attributes:
try:
return selfnode.attrib[name]
except KeyError:
raise AttributeError(name)
elem = selfnode.find(self.__getpath__(name))
if elem is None:
# It might be an <a name> link.
for anchor_elem in selfnode.findall('a'):
if anchor_elem.attrib.get('name') == name:
url = anchor_elem.attrib['href']
method = anchor_elem.attrib['method'].upper()
return self._make_actionator(url, method)
raise AttributeError(name)
# Follow links.
if 'href' in elem.attrib:
def make_relatitator(url):
def relatitator(**kwargs):
if kwargs:
full_url = '%s?%s' % (url, urlencode(kwargs))
else:
full_url = url
resp, elem = Resource.element_for_url(full_url)
value = Resource.value_for_element(elem)
if isinstance(value, list):
return Page.page_for_value(resp, value)
return value
return relatitator
url = elem.attrib['href']
if url is '':
return Resource.value_for_element(elem)
else:
return make_relatitator(url)
return self.value_for_element(elem)
@classmethod
def all(cls, **kwargs):
"""Return a `Page` of instances of this `Resource` class from
its general collection endpoint.
Only `Resource` classes with specified `collection_path`
endpoints can be requested with this method. Any provided
keyword arguments are passed to the API endpoint as query
parameters.
"""
url = recurly.base_uri() + cls.collection_path
if kwargs:
url = '%s?%s' % (url, urlencode(kwargs))
return Page.page_for_url(url)
@classmethod
def count(cls, **kwargs):
"""Return a count of server side resources given
filtering arguments in kwargs.
"""
url = recurly.base_uri() + cls.collection_path
if kwargs:
url = '%s?%s' % (url, urlencode(kwargs))
return Page.count_for_url(url)
def save(self):
"""Save this `Resource` instance to the service.
If this is a new instance, it is created through a ``POST``
request to its collection endpoint. If this instance already
exists in the service, it is updated through a ``PUT`` request
to its own URL.
"""
if hasattr(self, '_url'):
return self._update()
return self._create()
def _update(self):
return self.put(self._url)
def _create(self):
url = recurly.base_uri() + self.collection_path
return self.post(url)
def put(self, url):
"""Sends this `Resource` instance to the service with a
``PUT`` request to the given URL."""
response = self.http_request(url, 'PUT', self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status != 200:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml))
def post(self, url, body=None):
"""Sends this `Resource` instance to the service with a
``POST`` request to the given URL. Takes an optional body"""
response = self.http_request(url, 'POST', body or self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status not in (200, 201, 204):
self.raise_http_error(response)
self._url = response.getheader('Location')
if response.status in (200, 201):
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml))
def delete(self):
"""Submits a deletion request for this `Resource` instance as
a ``DELETE`` request to its URL."""
response = self.http_request(self._url, 'DELETE')
if response.status != 204:
self.raise_http_error(response)
@classmethod
def raise_http_error(cls, response):
"""Raise a `ResponseError` of the appropriate subclass in
reaction to the given `http_client.HTTPResponse`."""
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
exc_class = recurly.errors.error_class_for_http_status(response.status)
raise exc_class(response_xml)
def to_element(self, root_name=None):
"""Serialize this `Resource` instance to an XML element."""
if not root_name:
root_name = self.nodename
elem = ElementTree.Element(root_name)
for attrname in self.serializable_attributes():
# Only use values that have been loaded into the internal
# __dict__. For retrieved objects we look into the XML response at
# access time, so the internal __dict__ contains only the elements
# that have been set on the client side.
try:
value = self.__dict__[attrname]
except KeyError:
continue
if attrname in self.xml_attribute_attributes:
elem.attrib[attrname] = six.text_type(value)
else:
sub_elem = self.element_for_value(attrname, value)
elem.append(sub_elem)
return elem
| ./CrossVul/dataset_final_sorted/CWE-918/py/good_2503_0 |
crossvul-python_data_bad_2503_0 | import base64
import re
from datetime import datetime
import logging
import ssl
from xml.etree import ElementTree
import iso8601
import six
import recurly
import recurly.errors
from recurly.link_header import parse_link_value
from six.moves import http_client
from six.moves.urllib.parse import urlencode, urljoin, urlsplit
class Money(object):
"""An amount of money in one or more currencies."""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Money may be single currency or multi-currency but not both")
elif kwargs:
self.currencies = dict(kwargs)
elif args and len(args) > 1:
raise ValueError("Multi-currency Money must be instantiated with codes")
elif args:
self.currencies = { recurly.DEFAULT_CURRENCY: args[0] }
else:
self.currencies = dict()
@classmethod
def from_element(cls, elem):
currency = dict()
for child_el in elem:
if not child_el.tag:
continue
currency[child_el.tag] = int(child_el.text)
return cls(**currency)
def add_to_element(self, elem):
for currency, amount in self.currencies.items():
currency_el = ElementTree.Element(currency)
currency_el.attrib['type'] = 'integer'
currency_el.text = six.text_type(amount)
elem.append(currency_el)
def __getitem__(self, name):
return self.currencies[name]
def __setitem__(self, name, value):
self.currencies[name] = value
def __delitem__(self, name, value):
del self.currencies[name]
def __contains__(self, name):
return name in self.currencies
class PageError(ValueError):
"""An error raised when requesting to continue to a stream page that
doesn't exist.
This error can be raised when requesting the next page for the last page in
a series, or the first page for the first page in a series.
"""
pass
class Page(list):
"""A set of related `Resource` instances retrieved together from
the API.
Use `Page` instances as `list` instances to access their contents.
"""
def __iter__(self):
if not self:
raise StopIteration
page = self
while page:
for x in list.__iter__(page):
yield x
try:
page = page.next_page()
except PageError:
try:
del self.next_url
except AttributeError:
pass
raise StopIteration
def next_page(self):
"""Return the next `Page` after this one in the result sequence
it's from.
If the current page is the last page in the sequence, calling
this method raises a `ValueError`.
"""
try:
next_url = self.next_url
except AttributeError:
raise PageError("Page %r has no next page" % self)
return self.page_for_url(next_url)
def first_page(self):
"""Return the first `Page` in the result sequence this `Page`
instance is from.
If the current page is already the first page in the sequence,
calling this method raises a `ValueError`.
"""
try:
start_url = self.start_url
except AttributeError:
raise PageError("Page %r is already the first page" % self)
return self.page_for_url(start_url)
@classmethod
def page_for_url(cls, url):
"""Return a new `Page` containing the items at the given
endpoint URL."""
resp, elem = Resource.element_for_url(url)
value = Resource.value_for_element(elem)
return cls.page_for_value(resp, value)
@classmethod
def count_for_url(cls, url):
"""Return the count of server side resources given a url"""
headers = Resource.headers_for_url(url)
return int(headers['X-Records'])
@classmethod
def page_for_value(cls, resp, value):
"""Return a new `Page` representing the given resource `value`
retrieved using the HTTP response `resp`.
This method records pagination ``Link`` headers present in `resp`, so
that the returned `Page` can return their resources from its
`next_page()` and `first_page()` methods.
"""
page = cls(value)
links = parse_link_value(resp.getheader('Link'))
for url, data in six.iteritems(links):
if data.get('rel') == 'start':
page.start_url = url
if data.get('rel') == 'next':
page.next_url = url
return page
class Resource(object):
"""A Recurly API resource.
This superclass implements the general behavior for all the
specific Recurly API resources.
All method parameters and return values that are XML elements are
`xml.etree.ElementTree.Element` instances.
"""
_classes_for_nodename = dict()
sensitive_attributes = ()
"""Attributes that are not logged with the rest of a `Resource`
of this class when submitted in a ``POST`` or ``PUT`` request."""
xml_attribute_attributes = ()
"""Attributes of a `Resource` of this class that are not serialized
as subelements, but rather attributes of the top level element."""
inherits_currency = False
"""Whether a `Resource` of this class inherits a currency from a
parent `Resource`, and therefore should not use `Money` instances
even though this `Resource` class has no ``currency`` attribute of
its own."""
def serializable_attributes(self):
""" Attributes to be serialized in a ``POST`` or ``PUT`` request.
Returns all attributes unless a blacklist is specified
"""
if hasattr(self, 'blacklist_attributes'):
return [attr for attr in self.attributes if attr not in
self.blacklist_attributes]
else:
return self.attributes
def __init__(self, **kwargs):
try:
self.attributes.index('currency') # Test for currency attribute,
self.currency # and test if it's set.
except ValueError:
pass
except AttributeError:
self.currency = recurly.DEFAULT_CURRENCY
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classmethod
def http_request(cls, url, method='GET', body=None, headers=None):
"""Make an HTTP request with the given method to the given URL,
returning the resulting `http_client.HTTPResponse` instance.
If the `body` argument is a `Resource` instance, it is serialized
to XML by calling its `to_element()` method before submitting it.
Requests are authenticated per the Recurly API specification
using the ``recurly.API_KEY`` value for the API key.
Requests and responses are logged at the ``DEBUG`` level to the
``recurly.http.request`` and ``recurly.http.response`` loggers
respectively.
"""
if recurly.API_KEY is None:
raise recurly.UnauthorizedError('recurly.API_KEY not set')
is_non_ascii = lambda s: any(ord(c) >= 128 for c in s)
if is_non_ascii(recurly.API_KEY) or is_non_ascii(recurly.SUBDOMAIN):
raise recurly.ConfigurationError("""Setting API_KEY or SUBDOMAIN to
unicode strings may cause problems. Please use strings.
Issue described here:
https://gist.github.com/maximehardy/d3a0a6427d2b6791b3dc""")
urlparts = urlsplit(url)
connection_options = {}
if recurly.SOCKET_TIMEOUT_SECONDS:
connection_options['timeout'] = recurly.SOCKET_TIMEOUT_SECONDS
if urlparts.scheme != 'https':
connection = http_client.HTTPConnection(urlparts.netloc, **connection_options)
elif recurly.CA_CERTS_FILE is None:
connection = http_client.HTTPSConnection(urlparts.netloc, **connection_options)
else:
connection_options['context'] = ssl.create_default_context(cafile=recurly.CA_CERTS_FILE)
connection = http_client.HTTPSConnection(urlparts.netloc, **connection_options)
headers = {} if headers is None else dict(headers)
headers.setdefault('Accept', 'application/xml')
headers.update({
'User-Agent': recurly.USER_AGENT
})
headers['X-Api-Version'] = recurly.api_version()
headers['Authorization'] = 'Basic %s' % base64.b64encode(six.b('%s:' % recurly.API_KEY)).decode()
log = logging.getLogger('recurly.http.request')
if log.isEnabledFor(logging.DEBUG):
log.debug("%s %s HTTP/1.1", method, url)
for header, value in six.iteritems(headers):
if header == 'Authorization':
value = '<redacted>'
log.debug("%s: %s", header, value)
log.debug('')
if method in ('POST', 'PUT') and body is not None:
if isinstance(body, Resource):
log.debug(body.as_log_output())
else:
log.debug(body)
if isinstance(body, Resource):
body = ElementTree.tostring(body.to_element(), encoding='UTF-8')
headers['Content-Type'] = 'application/xml; charset=utf-8'
if method in ('POST', 'PUT') and body is None:
headers['Content-Length'] = '0'
connection.request(method, url, body, headers)
resp = connection.getresponse()
resp_headers = cls.headers_as_dict(resp)
log = logging.getLogger('recurly.http.response')
if log.isEnabledFor(logging.DEBUG):
log.debug("HTTP/1.1 %d %s", resp.status, resp.reason)
log.debug(resp_headers)
log.debug('')
recurly.cache_rate_limit_headers(resp_headers)
return resp
@classmethod
def headers_as_dict(cls, resp):
"""Turns an array of response headers into a dictionary"""
if six.PY2:
pairs = [header.split(': ') for header in resp.msg.headers]
return dict([(k, v.strip()) for k, v in pairs])
else:
return dict([(k, v.strip()) for k, v in resp.msg._headers])
def as_log_output(self):
"""Returns an XML string containing a serialization of this
instance suitable for logging.
Attributes named in the instance's `sensitive_attributes` are
redacted.
"""
elem = self.to_element()
for attrname in self.sensitive_attributes:
for sensitive_el in elem.iter(attrname):
sensitive_el.text = 'XXXXXXXXXXXXXXXX'
return ElementTree.tostring(elem, encoding='UTF-8')
@classmethod
def _learn_nodenames(cls, classes):
for resource_class in classes:
try:
rc_is_subclass = issubclass(resource_class, cls)
except TypeError:
continue
if not rc_is_subclass:
continue
nodename = getattr(resource_class, 'nodename', None)
if nodename is None:
continue
cls._classes_for_nodename[nodename] = resource_class
@classmethod
def get(cls, uuid):
"""Return a `Resource` instance of this class identified by
the given code or UUID.
Only `Resource` classes with specified `member_path` attributes
can be directly requested with this method.
"""
url = urljoin(recurly.base_uri(), cls.member_path % (uuid,))
resp, elem = cls.element_for_url(url)
return cls.from_element(elem)
@classmethod
def headers_for_url(cls, url):
"""Return the headers only for the given URL as a dict"""
response = cls.http_request(url, method='HEAD')
if response.status != 200:
cls.raise_http_error(response)
return Resource.headers_as_dict(response)
@classmethod
def element_for_url(cls, url):
"""Return the resource at the given URL, as a
(`http_client.HTTPResponse`, `xml.etree.ElementTree.Element`) tuple
resulting from a ``GET`` request to that URL."""
response = cls.http_request(url)
if response.status != 200:
cls.raise_http_error(response)
assert response.getheader('Content-Type').startswith('application/xml')
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
response_doc = ElementTree.fromstring(response_xml)
return response, response_doc
@classmethod
def _subclass_for_nodename(cls, nodename):
try:
return cls._classes_for_nodename[nodename]
except KeyError:
raise ValueError("Could not determine resource class for array member with tag %r"
% nodename)
@classmethod
def value_for_element(cls, elem):
"""Deserialize the given XML `Element` into its representative
value.
Depending on the content of the element, the returned value may be:
* a string, integer, or boolean value
* a `datetime.datetime` instance
* a list of `Resource` instances
* a single `Resource` instance
* a `Money` instance
* ``None``
"""
log = logging.getLogger('recurly.resource')
if elem is None:
log.debug("Converting %r element into None value", elem)
return
if elem.attrib.get('nil') is not None:
log.debug("Converting %r element with nil attribute into None value", elem.tag)
return
if elem.tag.endswith('_in_cents') and 'currency' not in cls.attributes and not cls.inherits_currency:
log.debug("Converting %r element in class with no matching 'currency' into a Money value", elem.tag)
return Money.from_element(elem)
attr_type = elem.attrib.get('type')
log.debug("Converting %r element with type %r", elem.tag, attr_type)
if attr_type == 'integer':
return int(elem.text.strip())
if attr_type == 'float':
return float(elem.text.strip())
if attr_type == 'boolean':
return elem.text.strip() == 'true'
if attr_type == 'datetime':
return iso8601.parse_date(elem.text.strip())
if attr_type == 'array':
return [cls._subclass_for_nodename(sub_elem.tag).from_element(sub_elem) for sub_elem in elem]
# Unknown types may be the names of resource classes.
if attr_type is not None:
try:
value_class = cls._subclass_for_nodename(attr_type)
except ValueError:
log.debug("Not converting %r element with type %r to a resource as that matches no known nodename",
elem.tag, attr_type)
else:
return value_class.from_element(elem)
# Untyped complex elements should still be resource instances. Guess from the nodename.
if len(elem): # has children
value_class = cls._subclass_for_nodename(elem.tag)
log.debug("Converting %r tag into a %s", elem.tag, value_class.__name__)
return value_class.from_element(elem)
value = elem.text or ''
return value.strip()
@classmethod
def element_for_value(cls, attrname, value):
"""Serialize the given value into an XML `Element` with the
given tag name, returning it.
The value argument may be:
* a `Resource` instance
* a `Money` instance
* a `datetime.datetime` instance
* a string, integer, or boolean value
* ``None``
* a list or tuple of these values
"""
if isinstance(value, Resource):
if attrname in cls._classes_for_nodename:
# override the child's node name with this attribute name
return value.to_element(attrname)
return value.to_element()
el = ElementTree.Element(attrname)
if value is None:
el.attrib['nil'] = 'nil'
elif isinstance(value, bool):
el.attrib['type'] = 'boolean'
el.text = 'true' if value else 'false'
elif isinstance(value, int):
el.attrib['type'] = 'integer'
el.text = str(value)
elif isinstance(value, datetime):
el.attrib['type'] = 'datetime'
el.text = value.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(value, list) or isinstance(value, tuple):
for sub_resource in value:
if hasattr(sub_resource, 'to_element'):
el.append(sub_resource.to_element())
else:
el.append(cls.element_for_value(re.sub(r"s$", "", attrname), sub_resource))
elif isinstance(value, Money):
value.add_to_element(el)
else:
el.text = six.text_type(value)
return el
@classmethod
def paginated(self, url):
""" Exposes Page.page_for_url in Resource """
return Page.page_for_url(url)
@classmethod
def from_element(cls, elem):
"""Return a new instance of this `Resource` class representing
the given XML element."""
return cls().update_from_element(elem)
def update_from_element(self, elem):
"""Reset this `Resource` instance to represent the values in
the given XML element."""
self._elem = elem
for attrname in self.attributes:
try:
delattr(self, attrname)
except AttributeError:
pass
document_url = elem.attrib.get('href')
if document_url is not None:
self._url = document_url
return self
def _make_actionator(self, url, method, extra_handler=None):
def actionator(*args, **kwargs):
if kwargs:
full_url = '%s?%s' % (url, urlencode(kwargs))
else:
full_url = url
body = args[0] if args else None
response = self.http_request(full_url, method, body)
if response.status == 200:
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
return self.update_from_element(ElementTree.fromstring(response_xml))
elif response.status == 201:
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
elem = ElementTree.fromstring(response_xml)
return self.value_for_element(elem)
elif response.status == 204:
pass
elif extra_handler is not None:
return extra_handler(response)
else:
self.raise_http_error(response)
return actionator
#usually the path is the same as the element name
def __getpath__(self, name):
return name
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(name)
try:
selfnode = self._elem
except AttributeError:
raise AttributeError(name)
if name in self.xml_attribute_attributes:
try:
return selfnode.attrib[name]
except KeyError:
raise AttributeError(name)
elem = selfnode.find(self.__getpath__(name))
if elem is None:
# It might be an <a name> link.
for anchor_elem in selfnode.findall('a'):
if anchor_elem.attrib.get('name') == name:
url = anchor_elem.attrib['href']
method = anchor_elem.attrib['method'].upper()
return self._make_actionator(url, method)
raise AttributeError(name)
# Follow links.
if 'href' in elem.attrib:
def make_relatitator(url):
def relatitator(**kwargs):
if kwargs:
full_url = '%s?%s' % (url, urlencode(kwargs))
else:
full_url = url
resp, elem = Resource.element_for_url(full_url)
value = Resource.value_for_element(elem)
if isinstance(value, list):
return Page.page_for_value(resp, value)
return value
return relatitator
url = elem.attrib['href']
if url is '':
return Resource.value_for_element(elem)
else:
return make_relatitator(url)
return self.value_for_element(elem)
@classmethod
def all(cls, **kwargs):
"""Return a `Page` of instances of this `Resource` class from
its general collection endpoint.
Only `Resource` classes with specified `collection_path`
endpoints can be requested with this method. Any provided
keyword arguments are passed to the API endpoint as query
parameters.
"""
url = urljoin(recurly.base_uri(), cls.collection_path)
if kwargs:
url = '%s?%s' % (url, urlencode(kwargs))
return Page.page_for_url(url)
@classmethod
def count(cls, **kwargs):
"""Return a count of server side resources given
filtering arguments in kwargs.
"""
url = urljoin(recurly.base_uri(), cls.collection_path)
if kwargs:
url = '%s?%s' % (url, urlencode(kwargs))
return Page.count_for_url(url)
def save(self):
"""Save this `Resource` instance to the service.
If this is a new instance, it is created through a ``POST``
request to its collection endpoint. If this instance already
exists in the service, it is updated through a ``PUT`` request
to its own URL.
"""
if hasattr(self, '_url'):
return self._update()
return self._create()
def _update(self):
return self.put(self._url)
def _create(self):
url = urljoin(recurly.base_uri(), self.collection_path)
return self.post(url)
def put(self, url):
"""Sends this `Resource` instance to the service with a
``PUT`` request to the given URL."""
response = self.http_request(url, 'PUT', self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status != 200:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml))
def post(self, url, body=None):
"""Sends this `Resource` instance to the service with a
``POST`` request to the given URL. Takes an optional body"""
response = self.http_request(url, 'POST', body or self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status not in (200, 201, 204):
self.raise_http_error(response)
self._url = response.getheader('Location')
if response.status in (200, 201):
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml))
def delete(self):
"""Submits a deletion request for this `Resource` instance as
a ``DELETE`` request to its URL."""
response = self.http_request(self._url, 'DELETE')
if response.status != 204:
self.raise_http_error(response)
@classmethod
def raise_http_error(cls, response):
"""Raise a `ResponseError` of the appropriate subclass in
reaction to the given `http_client.HTTPResponse`."""
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
exc_class = recurly.errors.error_class_for_http_status(response.status)
raise exc_class(response_xml)
def to_element(self, root_name=None):
"""Serialize this `Resource` instance to an XML element."""
if not root_name:
root_name = self.nodename
elem = ElementTree.Element(root_name)
for attrname in self.serializable_attributes():
# Only use values that have been loaded into the internal
# __dict__. For retrieved objects we look into the XML response at
# access time, so the internal __dict__ contains only the elements
# that have been set on the client side.
try:
value = self.__dict__[attrname]
except KeyError:
continue
if attrname in self.xml_attribute_attributes:
elem.attrib[attrname] = six.text_type(value)
else:
sub_elem = self.element_for_value(attrname, value)
elem.append(sub_elem)
return elem
| ./CrossVul/dataset_final_sorted/CWE-918/py/bad_2503_0 |
crossvul-python_data_good_5581_2 | # -*- coding: utf-8 -*-
#
# SelfTest/Random/__init__.py: Self-test for random number generation modules
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for random number generators"""
__revision__ = "$Id$"
def get_tests(config={}):
tests = []
from Crypto.SelfTest.Random import Fortuna; tests += Fortuna.get_tests(config=config)
from Crypto.SelfTest.Random import OSRNG; tests += OSRNG.get_tests(config=config)
from Crypto.SelfTest.Random import test_random; tests += test_random.get_tests(config=config)
from Crypto.SelfTest.Random import test_rpoolcompat; tests += test_rpoolcompat.get_tests(config=config)
from Crypto.SelfTest.Random import test__UserFriendlyRNG; tests += test__UserFriendlyRNG.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| ./CrossVul/dataset_final_sorted/CWE-310/py/good_5581_2 |
crossvul-python_data_good_5581_0 | # -*- coding: ascii -*-
#
# FortunaAccumulator.py : Fortuna's internal accumulator
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
from binascii import b2a_hex
import time
import warnings
from Crypto.pct_warnings import ClockRewindWarning
import SHAd256
import FortunaGenerator
class FortunaPool(object):
"""Fortuna pool type
This object acts like a hash object, with the following differences:
- It keeps a count (the .length attribute) of the number of bytes that
have been added to the pool
- It supports a .reset() method for in-place reinitialization
- The method to add bytes to the pool is .append(), not .update().
"""
digest_size = SHAd256.digest_size
def __init__(self):
self.reset()
def append(self, data):
self._h.update(data)
self.length += len(data)
def digest(self):
return self._h.digest()
def hexdigest(self):
if sys.version_info[0] == 2:
return b2a_hex(self.digest())
else:
return b2a_hex(self.digest()).decode()
def reset(self):
self._h = SHAd256.new()
self.length = 0
def which_pools(r):
"""Return a list of pools indexes (in range(32)) that are to be included during reseed number r.
According to _Practical Cryptography_, chapter 10.5.2 "Pools":
"Pool P_i is included if 2**i is a divisor of r. Thus P_0 is used
every reseed, P_1 every other reseed, P_2 every fourth reseed, etc."
"""
# This is a separate function so that it can be unit-tested.
assert r >= 1
retval = []
mask = 0
for i in range(32):
# "Pool P_i is included if 2**i is a divisor of [reseed_count]"
if (r & mask) == 0:
retval.append(i)
else:
break # optimization. once this fails, it always fails
mask = (mask << 1) | 1L
return retval
class FortunaAccumulator(object):
min_pool_size = 64 # TODO: explain why
reseed_interval = 0.100 # 100 ms TODO: explain why
def __init__(self):
self.reseed_count = 0
self.generator = FortunaGenerator.AESGenerator()
self.last_reseed = None
# Initialize 32 FortunaPool instances.
# NB: This is _not_ equivalent to [FortunaPool()]*32, which would give
# us 32 references to the _same_ FortunaPool instance (and cause the
# assertion below to fail).
self.pools = [FortunaPool() for i in range(32)] # 32 pools
assert(self.pools[0] is not self.pools[1])
def _forget_last_reseed(self):
# This is not part of the standard Fortuna definition, and using this
# function frequently can weaken Fortuna's ability to resist a state
# compromise extension attack, but we need this in order to properly
# implement Crypto.Random.atfork(). Otherwise, forked child processes
# might continue to use their parent's PRNG state for up to 100ms in
# some cases. (e.g. CVE-2013-1445)
self.last_reseed = None
def random_data(self, bytes):
current_time = time.time()
if (self.last_reseed is not None and self.last_reseed > current_time): # Avoid float comparison to None to make Py3k happy
warnings.warn("Clock rewind detected. Resetting last_reseed.", ClockRewindWarning)
self.last_reseed = None
if (self.pools[0].length >= self.min_pool_size and
(self.last_reseed is None or
current_time > self.last_reseed + self.reseed_interval)):
self._reseed(current_time)
# The following should fail if we haven't seeded the pool yet.
return self.generator.pseudo_random_data(bytes)
def _reseed(self, current_time=None):
if current_time is None:
current_time = time.time()
seed = []
self.reseed_count += 1
self.last_reseed = current_time
for i in which_pools(self.reseed_count):
seed.append(self.pools[i].digest())
self.pools[i].reset()
seed = b("").join(seed)
self.generator.reseed(seed)
def add_random_event(self, source_number, pool_number, data):
assert 1 <= len(data) <= 32
assert 0 <= source_number <= 255
assert 0 <= pool_number <= 31
self.pools[pool_number].append(bchr(source_number))
self.pools[pool_number].append(bchr(len(data)))
self.pools[pool_number].append(data)
# vim:set ts=4 sw=4 sts=4 expandtab:
| ./CrossVul/dataset_final_sorted/CWE-310/py/good_5581_0 |
crossvul-python_data_good_5581_1 | # -*- coding: utf-8 -*-
#
# Random/_UserFriendlyRNG.py : A user-friendly random number generator
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
import os
import threading
import struct
import time
from math import floor
from Crypto.Random import OSRNG
from Crypto.Random.Fortuna import FortunaAccumulator
class _EntropySource(object):
def __init__(self, accumulator, src_num):
self._fortuna = accumulator
self._src_num = src_num
self._pool_num = 0
def feed(self, data):
self._fortuna.add_random_event(self._src_num, self._pool_num, data)
self._pool_num = (self._pool_num + 1) & 31
class _EntropyCollector(object):
def __init__(self, accumulator):
self._osrng = OSRNG.new()
self._osrng_es = _EntropySource(accumulator, 255)
self._time_es = _EntropySource(accumulator, 254)
self._clock_es = _EntropySource(accumulator, 253)
def reinit(self):
# Add 256 bits to each of the 32 pools, twice. (For a total of 16384
# bits collected from the operating system.)
for i in range(2):
block = self._osrng.read(32*32)
for p in range(32):
self._osrng_es.feed(block[p*32:(p+1)*32])
block = None
self._osrng.flush()
def collect(self):
# Collect 64 bits of entropy from the operating system and feed it to Fortuna.
self._osrng_es.feed(self._osrng.read(8))
# Add the fractional part of time.time()
t = time.time()
self._time_es.feed(struct.pack("@I", int(2**30 * (t - floor(t)))))
# Add the fractional part of time.clock()
t = time.clock()
self._clock_es.feed(struct.pack("@I", int(2**30 * (t - floor(t)))))
class _UserFriendlyRNG(object):
def __init__(self):
self.closed = False
self._fa = FortunaAccumulator.FortunaAccumulator()
self._ec = _EntropyCollector(self._fa)
self.reinit()
def reinit(self):
"""Initialize the random number generator and seed it with entropy from
the operating system.
"""
# Save the pid (helps ensure that Crypto.Random.atfork() gets called)
self._pid = os.getpid()
# Collect entropy from the operating system and feed it to
# FortunaAccumulator
self._ec.reinit()
# Override FortunaAccumulator's 100ms minimum re-seed interval. This
# is necessary to avoid a race condition between this function and
# self.read(), which that can otherwise cause forked child processes to
# produce identical output. (e.g. CVE-2013-1445)
#
# Note that if this function can be called frequently by an attacker,
# (and if the bits from OSRNG are insufficiently random) it will weaken
# Fortuna's ability to resist a state compromise extension attack.
self._fa._forget_last_reseed()
def close(self):
self.closed = True
self._osrng = None
self._fa = None
def flush(self):
pass
def read(self, N):
"""Return N bytes from the RNG."""
if self.closed:
raise ValueError("I/O operation on closed file")
if not isinstance(N, (long, int)):
raise TypeError("an integer is required")
if N < 0:
raise ValueError("cannot read to end of infinite stream")
# Collect some entropy and feed it to Fortuna
self._ec.collect()
# Ask Fortuna to generate some bytes
retval = self._fa.random_data(N)
# Check that we haven't forked in the meantime. (If we have, we don't
# want to use the data, because it might have been duplicated in the
# parent process.
self._check_pid()
# Return the random data.
return retval
def _check_pid(self):
# Lame fork detection to remind developers to invoke Random.atfork()
# after every call to os.fork(). Note that this check is not reliable,
# since process IDs can be reused on most operating systems.
#
# You need to do Random.atfork() in the child process after every call
# to os.fork() to avoid reusing PRNG state. If you want to avoid
# leaking PRNG state to child processes (for example, if you are using
# os.setuid()) then you should also invoke Random.atfork() in the
# *parent* process.
if os.getpid() != self._pid:
raise AssertionError("PID check failed. RNG must be re-initialized after fork(). Hint: Try Random.atfork()")
class _LockingUserFriendlyRNG(_UserFriendlyRNG):
def __init__(self):
self._lock = threading.Lock()
_UserFriendlyRNG.__init__(self)
def close(self):
self._lock.acquire()
try:
return _UserFriendlyRNG.close(self)
finally:
self._lock.release()
def reinit(self):
self._lock.acquire()
try:
return _UserFriendlyRNG.reinit(self)
finally:
self._lock.release()
def read(self, bytes):
self._lock.acquire()
try:
return _UserFriendlyRNG.read(self, bytes)
finally:
self._lock.release()
class RNGFile(object):
def __init__(self, singleton):
self.closed = False
self._singleton = singleton
# PEP 343: Support for the "with" statement
def __enter__(self):
"""PEP 343 support"""
def __exit__(self):
"""PEP 343 support"""
self.close()
def close(self):
# Don't actually close the singleton, just close this RNGFile instance.
self.closed = True
self._singleton = None
def read(self, bytes):
if self.closed:
raise ValueError("I/O operation on closed file")
return self._singleton.read(bytes)
def flush(self):
if self.closed:
raise ValueError("I/O operation on closed file")
_singleton_lock = threading.Lock()
_singleton = None
def _get_singleton():
global _singleton
_singleton_lock.acquire()
try:
if _singleton is None:
_singleton = _LockingUserFriendlyRNG()
return _singleton
finally:
_singleton_lock.release()
def new():
return RNGFile(_get_singleton())
def reinit():
_get_singleton().reinit()
def get_random_bytes(n):
"""Return the specified number of cryptographically-strong random bytes."""
return _get_singleton().read(n)
# vim:set ts=4 sw=4 sts=4 expandtab:
| ./CrossVul/dataset_final_sorted/CWE-310/py/good_5581_1 |
crossvul-python_data_bad_5581_1 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-310/py/bad_5581_1 |
crossvul-python_data_good_546_1 | import datetime
import re
import time
import urllib2
from email import encoders
from email.mime.base import MIMEBase
import mailpile.security as security
from mailpile.conn_brokers import Master as ConnBroker
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.commands import Command
from mailpile.crypto.gpgi import GnuPG
from mailpile.crypto.gpgi import OpenPGPMimeSigningWrapper
from mailpile.crypto.gpgi import OpenPGPMimeEncryptingWrapper
from mailpile.crypto.gpgi import OpenPGPMimeSignEncryptWrapper
from mailpile.crypto.mime import UnwrapMimeCrypto, MessageAsString
from mailpile.crypto.mime import OBSCURE_HEADERS_MILD, OBSCURE_HEADERS_EXTREME
from mailpile.crypto.mime import ObscureSubject
from mailpile.crypto.state import EncryptionInfo, SignatureInfo
from mailpile.eventlog import GetThreadEvent
from mailpile.mailutils.addresses import AddressHeaderParser
from mailpile.mailutils.emails import Email, MakeContentID, ClearParseCache
from mailpile.plugins import PluginManager, EmailTransform
from mailpile.plugins.vcard_gnupg import PGPKeysImportAsVCards
from mailpile.plugins.search import Search
_plugins = PluginManager(builtin=__file__)
##[ GnuPG e-mail processing ]#################################################
class ContentTxf(EmailTransform):
def _wrap_key_in_html(self, title, keydata):
return ((
"<html><head><meta charset='utf-8'></head><body>\n"
"<h1>%(title)s</h1><p>\n\n%(description)s\n\n</p>"
"<pre>\n%(key)s\n</pre><hr>"
"<i><a href='%(ad_url)s'>%(ad)s</a>.</i></body></html>"
) % self._wrap_key_in_html_vars(title, keydata)).encode('utf-8')
def _wrap_key_in_html_vars(self, title, keydata):
return {
"title": title,
"description": _(
"This is a digital encryption key, which you can use to send\n"
"confidential messages to the owner, or to verify their\n"
"digital signatures. You can safely discard or ignore this\n"
"file if you do not use e-mail encryption or signatures."),
"ad": _("Generated by Mailpile and GnuPG"),
"ad_url": "https://www.mailpile.is/", # FIXME: Link to help?
"key": keydata}
def TransformOutgoing(self, sender, rcpts, msg, **kwargs):
matched = False
gnupg = None
sender_keyid = None
# Prefer to just get everything from the profile VCard, in the
# common case...
profile = self._get_sender_profile(sender, kwargs)
if profile['vcard'] is not None:
sender_keyid = profile['vcard'].pgp_key
crypto_format = profile.get('crypto_format') or 'none'
# Parse the openpgp_header data from the crypto_format
openpgp_header = [p.split(':')[-1]
for p in crypto_format.split('+')
if p.startswith('openpgp_header:')]
if not openpgp_header:
openpgp_header = self.config.prefs.openpgp_header and ['CFG']
if openpgp_header[0] != 'N' and not sender_keyid:
# This is a fallback: this shouldn't happen much in normal use
try:
gnupg = gnupg or GnuPG(self.config, event=GetThreadEvent())
seckeys = dict([(uid["email"], fp) for fp, key
in gnupg.list_secret_keys().iteritems()
if key["capabilities_map"].get("encrypt")
and key["capabilities_map"].get("sign")
for uid in key["uids"]])
sender_keyid = seckeys.get(sender)
except (KeyError, TypeError, IndexError, ValueError):
traceback.print_exc()
if sender_keyid and openpgp_header:
preference = {
'ES': 'signencrypt',
'SE': 'signencrypt',
'E': 'encrypt',
'S': 'sign',
'N': 'unprotected',
'CFG': self.config.prefs.openpgp_header
}[openpgp_header[0].upper()]
msg["OpenPGP"] = ("id=%s; preference=%s"
% (sender_keyid, preference))
if ('attach-pgp-pubkey' in msg and
msg['attach-pgp-pubkey'][:3].lower() in ('yes', 'tru')):
gnupg = gnupg or GnuPG(self.config, event=GetThreadEvent())
if sender_keyid:
keys = gnupg.list_keys(selectors=[sender_keyid])
else:
keys = gnupg.address_to_keys(AddressHeaderParser(sender).addresses_list()[0])
key_count = 0
for fp, key in keys.iteritems():
if not any(key["capabilities_map"].values()):
continue
# We should never really hit this more than once. But if we
# do, should still be fine.
keyid = key["keyid"]
data = gnupg.get_pubkey(keyid)
try:
from_name = key["uids"][0]["name"]
filename = _('Encryption key for %s') % from_name
except:
filename = _('My encryption key')
if self.config.prefs.gpg_html_wrap:
data = self._wrap_key_in_html(filename, data)
ext = 'html'
else:
ext = 'asc'
att = MIMEBase('application', 'pgp-keys')
att.set_payload(data)
encoders.encode_base64(att)
del att['MIME-Version']
att.add_header('Content-Id', MakeContentID())
att.add_header('Content-Disposition', 'attachment',
filename=filename + '.' + ext)
att.signature_info = SignatureInfo(parent=msg.signature_info)
att.encryption_info = EncryptionInfo(parent=msg.encryption_info)
msg.attach(att)
key_count += 1
if key_count > 0:
msg['x-mp-internal-pubkeys-attached'] = "Yes"
return sender, rcpts, msg, matched, True
class CryptoTxf(EmailTransform):
def TransformOutgoing(self, sender, rcpts, msg,
crypto_policy='none',
crypto_format='default',
cleaner=lambda m: m,
**kwargs):
matched = False
if 'pgp' in crypto_policy or 'gpg' in crypto_policy:
wrapper = None
# Set defaults
prefer_inline = kwargs.get('prefer_inline', False)
if 'obscure_all_meta' in crypto_format:
obscured = OBSCURE_HEADERS_EXTREME
elif 'obscure_meta' in crypto_format:
obscured = OBSCURE_HEADERS_MILD
elif self.config.prefs.encrypt_subject:
obscured = {'subject': ObscureSubject}
else:
obscured = {}
if 'sign' in crypto_policy and 'encrypt' in crypto_policy:
wrapper = OpenPGPMimeSignEncryptWrapper
prefer_inline = 'prefer_inline' in crypto_format
elif 'encrypt' in crypto_policy:
wrapper = OpenPGPMimeEncryptingWrapper
prefer_inline = 'prefer_inline' in crypto_format
elif 'sign' in crypto_policy:
# When signing only, we 1) prefer inline by default, based
# on this: https://github.com/mailpile/Mailpile/issues/1693
# and 2) don't obscure any headers as that's pointless.
wrapper = OpenPGPMimeSigningWrapper
prefer_inline = 'pgpmime' not in crypto_format
obscured = {}
if wrapper:
msg = wrapper(self.config,
sender=sender,
cleaner=cleaner,
recipients=rcpts,
use_html_wrapper=self.config.prefs.gpg_html_wrap,
obscured_headers=obscured
).wrap(msg, prefer_inline=prefer_inline)
matched = True
return sender, rcpts, msg, matched, (not matched)
_plugins.register_outgoing_email_content_transform('500_gnupg', ContentTxf)
_plugins.register_outgoing_email_crypto_transform('500_gnupg', CryptoTxf)
##[ Misc. GPG-related API commands ]##########################################
class GPGKeySearch(Command):
"""Search for a GPG Key."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/searchkey', 'crypto/gpg/searchkey', '<terms>')
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = {'q': 'search terms'}
class CommandResult(Command.CommandResult):
def as_text(self):
if self.result:
return '\n'.join(["%s: %s <%s>" % (keyid, x["name"], x["email"]) for keyid, det in self.result.iteritems() for x in det["uids"]])
else:
return _("No results")
def command(self):
args = list(self.args)
for q in self.data.get('q', []):
args.extend(q.split())
return self._gnupg().search_key(" ".join(args))
class GPGKeyReceive(Command):
"""Fetch a GPG Key."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/receivekey', 'crypto/gpg/receivekey', '<keyid>')
HTTP_CALLABLE = ('POST', )
HTTP_QUERY_VARS = {'keyid': 'ID of key to fetch'}
COMMAND_SECURITY = security.CC_CHANGE_GNUPG
def command(self):
keyid = self.data.get("keyid", self.args)
res = []
for key in keyid:
res.append(self._gnupg().recv_key(key))
# Previous crypto evaluations may now be out of date, so we
# clear the cache so users can see results right away.
ClearParseCache(pgpmime=True)
return res
class GPGKeyImport(Command):
"""Import a GPG Key."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/importkey', 'crypto/gpg/importkey',
'<key_file>')
HTTP_CALLABLE = ('POST', )
HTTP_QUERY_VARS = {
'key_data': 'ASCII armor of public key to be imported',
'key_file': 'Location of file containing the public key',
'key_url': 'URL of file containing the public key',
'name': '(ignored)'
}
COMMAND_SECURITY = security.CC_CHANGE_GNUPG
def command(self):
key_files = self.data.get("key_file", []) + [a for a in self.args
if not '://' in a]
key_urls = self.data.get("key_url", []) + [a for a in self.args
if '://' in a]
key_data = []
key_data.extend(self.data.get("key_data", []))
for key_file in key_files:
with open(key_file) as file:
key_data.append(file.read())
for key_url in key_urls:
with ConnBroker.context(need=[ConnBroker.OUTGOING_HTTP]):
uo = urllib2.urlopen(key_url)
key_data.append(uo.read())
rv = self._gnupg().import_keys('\n'.join(key_data))
# Previous crypto evaluations may now be out of date, so we
# clear the cache so users can see results right away.
ClearParseCache(pgpmime=True)
# Update the VCards!
PGPKeysImportAsVCards(self.session,
arg=([i['fingerprint'] for i in rv['updated']] +
[i['fingerprint'] for i in rv['imported']])
).run()
return self._success(_("Imported %d keys") % len(key_data), rv)
class GPGKeySign(Command):
"""Sign a key."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/signkey', 'crypto/gpg/signkey', '<keyid> [<signingkey>]')
HTTP_CALLABLE = ('POST',)
HTTP_QUERY_VARS = {'keyid': 'The key to sign',
'signingkey': 'The key to sign with'}
COMMAND_SECURITY = security.CC_CHANGE_GNUPG
def command(self):
signingkey = None
keyid = None
args = list(self.args)
try: keyid = args.pop(0)
except: keyid = self.data.get("keyid", None)
try: signingkey = args.pop(0)
except: signingkey = self.data.get("signingkey", None)
print keyid
if not keyid:
return self._error("You must supply a keyid", None)
rv = self._gnupg().sign_key(keyid, signingkey)
# Previous crypto evaluations may now be out of date, so we
# clear the cache so users can see results right away.
ClearParseCache(pgpmime=True)
return rv
class GPGKeyImportFromMail(Search):
"""Import a GPG Key."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/importkeyfrommail',
'crypto/gpg/importkeyfrommail', '<mid>')
HTTP_CALLABLE = ('POST', )
HTTP_QUERY_VARS = {'mid': 'Message ID', 'att': 'Attachment ID'}
COMMAND_CACHE_TTL = 0
COMMAND_SECURITY = security.CC_CHANGE_GNUPG
class CommandResult(Command.CommandResult):
def __init__(self, *args, **kwargs):
Command.CommandResult.__init__(self, *args, **kwargs)
def as_text(self):
if self.result:
return "Imported %d keys (%d updated, %d unchanged) from the mail" % (
self.result["results"]["count"],
self.result["results"]["imported"],
self.result["results"]["unchanged"])
return ""
def command(self):
session, config, idx = self.session, self.session.config, self._idx()
args = list(self.args)
if args and args[-1][0] == "#":
attid = args.pop()
else:
attid = self.data.get("att", 'application/pgp-keys')
args.extend(["=%s" % x for x in self.data.get("mid", [])])
eids = self._choose_messages(args)
if len(eids) < 0:
return self._error("No messages selected", None)
elif len(eids) > 1:
return self._error("One message at a time, please", None)
email = Email(idx, list(eids)[0])
fn, attr = email.extract_attachment(session, attid, mode='inline')
if attr and attr["data"]:
res = self._gnupg().import_keys(attr["data"])
# Previous crypto evaluations may now be out of date, so we
# clear the cache so users can see results right away.
ClearParseCache(pgpmime=True)
return self._success("Imported key", res)
return self._error("No results found", None)
class GPGKeyList(Command):
"""List GPG Keys."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/keylist',
'crypto/gpg/keylist', '<address>')
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = {'address': 'E-mail address'}
def command(self):
args = list(self.args)
if len(args) > 0:
addr = args[0]
else:
addr = self.data.get("address", None)
if addr is None:
return self._error("Must supply e-mail address", None)
res = self._gnupg().address_to_keys(addr)
return self._success("Searched for keys for e-mail address", res)
class GPGKeyListSecret(Command):
"""List Secret GPG Keys"""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/keylist/secret',
'crypto/gpg/keylist/secret', '[<check>]')
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = {'check': 'True to omit disabled, expired, revoked keys'}
def command(self):
args = list(self.args)
if len(args) > 0:
check = args[0]
else:
check = self.data.get('check', '')
check = 'True' in check
all = self._gnupg().list_secret_keys()
if check:
res = {fprint : all[fprint] for fprint in all
if not (all[fprint]['revoked'] or all[fprint]['disabled'])}
else:
res = all
return self._success("Searched for secret keys", res)
class GPGUsageStatistics(Search):
"""Get usage statistics from mail, given an address"""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/statistics',
'crypto/gpg/statistics', '<address>')
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = {'address': 'E-mail address'}
COMMAND_CACHE_TTL = 0
class CommandResult(Command.CommandResult):
def __init__(self, *args, **kwargs):
Command.CommandResult.__init__(self, *args, **kwargs)
def as_text(self):
if self.result:
return "%d%% of e-mail from %s has PGP signatures (%d/%d)" % (
100*self.result["ratio"],
self.result["address"],
self.result["pgpsigned"],
self.result["messages"])
return ""
def command(self):
args = list(self.args)
if len(args) > 0:
addr = args[0]
else:
addr = self.data.get("address", None)
if addr is None:
return self._error("Must supply an address", None)
session, idx = self._do_search(search=["from:%s" % addr])
total = 0
for messageid in session.results:
total += 1
session, idx = self._do_search(search=["from:%s" % addr, "has:pgp"])
pgp = 0
for messageid in session.results:
pgp += 1
if total > 0:
ratio = float(pgp)/total
else:
ratio = 0
res = {"messages": total,
"pgpsigned": pgp,
"ratio": ratio,
"address": addr}
return self._success("Got statistics for address", res)
class GPGCheckKeys(Search):
"""Sanity check your keys and profiles"""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/check_keys', 'crypto/gpg/check_keys',
'[--all-keys]')
HTTP_CALLABLE = ('GET', )
COMMAND_CACHE_TTL = 0
MIN_KEYSIZE = 2048
class CommandResult(Command.CommandResult):
def __init__(self, *args, **kwargs):
Command.CommandResult.__init__(self, *args, **kwargs)
def as_text(self):
if not isinstance(self.result, (dict,)):
return ''
if self.result.get('details'):
message = '%s.\n - %s' % (self.message, '\n - '.join(
p['description'] for p in self.result['details']
))
else:
message = '%s. %s' % (self.message, _('Looks good!'))
if self.result.get('fixes'):
message += '\n\n%s\n - %s' % (_('Proposed fixes:'),
'\n - '.join(
'\n * '.join(f) for f in self.result['fixes']
))
return message
def _fix_gen_key(self, min_bits=2048):
return [
_("You need a new key!"),
_("Run: %s") % '`gpg --gen-key`',
_("Answer the tool\'s questions: use RSA and RSA, %d bits or more"
) % min_bits]
def _fix_mp_config(self, good_key=None):
fprint = (good_key['fingerprint'] if good_key else '<FINGERPRINT>')
return [
_('Update the Mailpile config to use a good key:'),
_('IMPORTANT: This MUST be done before disabling the key!'),
_('Run: %s') % ('`set prefs.gpg_recipient = %s`' % fprint),
_('Run: %s') % ('`optimize`'),
_('This key\'s passphrase will be used to log in to Mailpile')]
def _fix_revoke_key(self, fprint, comment=''):
return [
_('Revoke bad keys:') + (' ' + comment if comment else ''),
_('Run: %s') % ('`gpg --gen-revoke %s`' % fprint),
_('Say yes to the first question, then follow the instructions'),
_('A revocation certificate will be shown on screen'),
_('Copy & paste that, save, and send to people who have the old key'),
_('You can search for %s to find such people'
) % '`is:encrypted to:me`']
def _fix_disable_key(self, fprint, comment=''):
return [
_('Disable bad keys:') + (' ' + comment if comment else ''),
_('Run: %s') % ('`gpg --edit-key %s`' % fprint),
_('Type %s') % '`disable`',
_('Type %s') % '`save`']
def command(self):
session, config = self.session, self.session.config
args = list(self.args)
all_keys = '--all-keys' in args
quiet = '--quiet' in args
date = datetime.date.today()
today = date.strftime("%Y-%m-%d")
date += datetime.timedelta(days=14)
fortnight = date.strftime("%Y-%m-%d")
serious = 0
details = []
fixes = []
bad_keys = {}
good_key = None
good_keys = {}
secret_keys = self._gnupg().list_secret_keys()
for fprint, info in secret_keys.iteritems():
k_info = {
'description': None,
'key': fprint,
'keysize': int(info.get('keysize', 0)),
}
is_serious = True
exp = info.get('expiration_date')
if info["disabled"]:
k_info['description'] = _('%s: --- Disabled.') % fprint
is_serious = False
elif (not info['capabilities_map'].get('encrypt') or
not info['capabilities_map'].get('sign')):
if info.get("revoked"):
k_info['description'] = _('%s: --- Revoked.'
) % fprint
is_serious = False
elif exp and exp <= today:
k_info['description'] = _('%s: Bad: Expired on %s'
) % (fprint,
info['expiration_date'])
else:
k_info['description'] = _('%s: Bad: Key is useless'
) % fprint
elif exp and exp <= fortnight:
k_info['description'] = _('%s: Bad: Expires on %s'
) % (fprint, info['expiration_date'])
elif k_info['keysize'] < self.MIN_KEYSIZE:
k_info['description'] = _('%s: Bad: Too small (%d bits)'
) % (fprint, k_info['keysize'])
else:
good_keys[fprint] = info
if (not good_key
or int(good_key['keysize']) < k_info['keysize']):
good_key = info
k_info['description'] = _('%s: OK: %d bits, looks good!'
) % (fprint, k_info['keysize'])
is_serious = False
if k_info['description'] is not None:
details.append(k_info)
if is_serious:
fixes += [self._fix_revoke_key(fprint, _('(optional)')),
self._fix_disable_key(fprint)]
serious += 1
if fprint not in good_keys:
bad_keys[fprint] = info
bad_recipient = False
if config.prefs.gpg_recipient:
for k in bad_keys:
if k.endswith(config.prefs.gpg_recipient):
details.append({
'gpg_recipient': True,
'description': _('%s: Mailpile config uses bad key'
) % k,
'key': k
})
bad_recipient = True
serious += 1
if bad_recipient and good_key:
fixes[:0] = [self._fix_mp_config(good_key)]
profiles = config.vcards.find_vcards([], kinds=['profile'])
for vc in profiles:
p_info = {
'profile': vc.get('x-mailpile-rid').value,
'email': vc.email,
'fn': vc.fn
}
try:
if all_keys:
vcls = [k.value for k in vc.get_all('key') if k.value]
else:
vcls = [vc.get('key').value]
except (IndexError, AttributeError):
vcls = []
for key in vcls:
fprint = key.split(',')[-1]
if fprint and fprint in bad_keys:
p_info['key'] = fprint
p_info['description'] = _('%(key)s: Bad key in profile'
' %(fn)s <%(email)s>'
' (%(profile)s)') % p_info
details.append(p_info)
serious += 1
if not vcls:
p_info['description'] = _('No key for %(fn)s <%(email)s>'
' (%(profile)s)') % p_info
details.append(p_info)
serious += 1
if len(good_keys) == 0:
fixes[:0] = [self._fix_gen_key(min_bits=self.MIN_KEYSIZE),
self._fix_mp_config()]
if quiet and not serious:
return self._success('OK')
ret = self._error if serious else self._success
return ret(_('Sanity checked: %d keys in GPG keyring, %d profiles')
% (len(secret_keys), len(profiles)),
result={'passed': not serious,
'details': details,
'fixes': fixes})
_plugins.register_commands(GPGKeySearch)
_plugins.register_commands(GPGKeyReceive)
_plugins.register_commands(GPGKeyImport)
_plugins.register_commands(GPGKeyImportFromMail)
_plugins.register_commands(GPGKeySign)
_plugins.register_commands(GPGKeyList)
_plugins.register_commands(GPGUsageStatistics)
_plugins.register_commands(GPGKeyListSecret)
_plugins.register_commands(GPGCheckKeys)
| ./CrossVul/dataset_final_sorted/CWE-310/py/good_546_1 |
crossvul-python_data_good_3658_0 | #
# ElGamal.py : ElGamal encryption/decryption and signatures
#
# Part of the Python Cryptography Toolkit
#
# Originally written by: A.M. Kuchling
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""ElGamal public-key algorithm (randomized encryption and signature).
Signature algorithm
-------------------
The security of the ElGamal signature scheme is based (like DSA) on the discrete
logarithm problem (DLP_). Given a cyclic group, a generator *g*,
and an element *h*, it is hard to find an integer *x* such that *g^x = h*.
The group is the largest multiplicative sub-group of the integers modulo *p*,
with *p* prime.
The signer holds a value *x* (*0<x<p-1*) as private key, and its public
key (*g*, *p*, *y* where *y=g^x mod p*) is distributed.
The ElGamal signature is twice as big as *p*.
Encryption algorithm
--------------------
The security of the ElGamal encryption scheme is based on the computational
Diffie-Hellman problem (CDH_). Given a cyclic group, a generator *g*,
and two integers *a* and *b*, it is difficult to find
the element *g^{ab}* when only *g^a* and *g^b* are known, and not *a* and *b*.
As before, the group is the largest multiplicative sub-group of the integers
modulo *p*, with *p* prime.
The receiver holds a value *a* (*0<a<p-1*) as private key, and its public key
(*g*, *p*, *b* where *b*=g^a*) is given to the sender.
The ElGamal ciphertext is twice as big as *p*.
Security
--------
Both DLP and CDH problem are believed to be difficult, and they have been proved
such (and therefore secure) for more than 30 years.
The cryptographic strength is linked to the magnitude of *p*.
In 2012, a sufficient size for *p* is deemed to be 2048 bits.
For more information, see the most recent ECRYPT_ report.
Even though ElGamal algorithms are in theory reasonably secure for new designs,
in practice there are no real good reasons for using them.
The signature is four times larger than the equivalent DSA, and the ciphertext
is two times larger than the equivalent RSA.
Functionality
-------------
This module provides facilities for generating new ElGamal keys and for constructing
them from known components. ElGamal keys allows you to perform basic signing,
verification, encryption, and decryption.
>>> from Crypto import Random
>>> from Crypto.Random import random
>>> from Crypto.PublicKey import ElGamal
>>> from Crypto.Util.number import GCD
>>> from Crypto.Hash import SHA
>>>
>>> message = "Hello"
>>> key = ElGamal.generate(1024, Random.new().read)
>>> h = SHA.new(message).digest()
>>> while 1:
>>> k = random.StrongRandom().randint(1,key.p-1)
>>> if GCD(k,key.p-1)==1: break
>>> sig = key.sign(h,k)
>>> ...
>>> if key.verify(h,sig):
>>> print "OK"
>>> else:
>>> print "Incorrect signature"
.. _DLP: http://www.cosic.esat.kuleuven.be/publications/talk-78.pdf
.. _CDH: http://en.wikipedia.org/wiki/Computational_Diffie%E2%80%93Hellman_assumption
.. _ECRYPT: http://www.ecrypt.eu.org/documents/D.SPA.17.pdf
"""
__revision__ = "$Id$"
__all__ = ['generate', 'construct', 'error', 'ElGamalobj']
from Crypto.PublicKey.pubkey import *
from Crypto.Util import number
class error (Exception):
pass
# Generate an ElGamal key with N bits
def generate(bits, randfunc, progress_func=None):
"""Randomly generate a fresh, new ElGamal key.
The key will be safe for use for both encryption and signature
(although it should be used for **only one** purpose).
:Parameters:
bits : int
Key length, or size (in bits) of the modulus *p*.
Recommended value is 2048.
randfunc : callable
Random number generation function; it should accept
a single integer N and return a string of random data
N bytes long.
progress_func : callable
Optional function that will be called with a short string
containing the key parameter currently being generated;
it's useful for interactive applications where a user is
waiting for a key to be generated.
:attention: You should always use a cryptographically secure random number generator,
such as the one defined in the ``Crypto.Random`` module; **don't** just use the
current time and the ``random`` module.
:Return: An ElGamal key object (`ElGamalobj`).
"""
obj=ElGamalobj()
# Generate a safe prime p
# See Algorithm 4.86 in Handbook of Applied Cryptography
if progress_func:
progress_func('p\n')
while 1:
q = bignum(getPrime(bits-1, randfunc))
obj.p = 2*q+1
if number.isPrime(obj.p, randfunc=randfunc):
break
# Generate generator g
# See Algorithm 4.80 in Handbook of Applied Cryptography
# Note that the order of the group is n=p-1=2q, where q is prime
if progress_func:
progress_func('g\n')
while 1:
# We must avoid g=2 because of Bleichenbacher's attack described
# in "Generating ElGamal signatures without knowning the secret key",
# 1996
#
obj.g = number.getRandomRange(3, obj.p, randfunc)
safe = 1
if pow(obj.g, 2, obj.p)==1:
safe=0
if safe and pow(obj.g, q, obj.p)==1:
safe=0
# Discard g if it divides p-1 because of the attack described
# in Note 11.67 (iii) in HAC
if safe and divmod(obj.p-1, obj.g)[1]==0:
safe=0
# g^{-1} must not divide p-1 because of Khadir's attack
# described in "Conditions of the generator for forging ElGamal
# signature", 2011
ginv = number.inverse(obj.g, obj.p)
if safe and divmod(obj.p-1, ginv)[1]==0:
safe=0
if safe:
break
# Generate private key x
if progress_func:
progress_func('x\n')
obj.x=number.getRandomRange(2, obj.p-1, randfunc)
# Generate public key y
if progress_func:
progress_func('y\n')
obj.y = pow(obj.g, obj.x, obj.p)
return obj
def construct(tup):
"""Construct an ElGamal key from a tuple of valid ElGamal components.
The modulus *p* must be a prime.
The following conditions must apply:
- 1 < g < p-1
- g^{p-1} = 1 mod p
- 1 < x < p-1
- g^x = y mod p
:Parameters:
tup : tuple
A tuple of long integers, with 3 or 4 items
in the following order:
1. Modulus (*p*).
2. Generator (*g*).
3. Public key (*y*).
4. Private key (*x*). Optional.
:Return: An ElGamal key object (`ElGamalobj`).
"""
obj=ElGamalobj()
if len(tuple) not in [3,4]:
raise ValueError('argument for construct() wrong length')
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class ElGamalobj(pubkey):
"""Class defining an ElGamal key.
:undocumented: __getstate__, __setstate__, __repr__, __getattr__
"""
#: Dictionary of ElGamal parameters.
#:
#: A public key will only have the following entries:
#:
#: - **y**, the public key.
#: - **g**, the generator.
#: - **p**, the modulus.
#:
#: A private key will also have:
#:
#: - **x**, the private key.
keydata=['p', 'g', 'y', 'x']
def encrypt(self, plaintext, K):
"""Encrypt a piece of data with ElGamal.
:Parameter plaintext: The piece of data to encrypt with ElGamal.
It must be numerically smaller than the module (*p*).
:Type plaintext: byte string or long
:Parameter K: A secret number, chosen randomly in the closed
range *[1,p-2]*.
:Type K: long (recommended) or byte string (not recommended)
:Return: A tuple with two items. Each item is of the same type as the
plaintext (string or long).
:attention: selection of *K* is crucial for security. Generating a
random number larger than *p-1* and taking the modulus by *p-1* is
**not** secure, since smaller values will occur more frequently.
Generating a random number systematically smaller than *p-1*
(e.g. *floor((p-1)/8)* random bytes) is also **not** secure.
In general, it shall not be possible for an attacker to know
the value of any bit of K.
:attention: The number *K* shall not be reused for any other
operation and shall be discarded immediately.
"""
return pubkey.encrypt(self, plaintext, K)
def decrypt(self, ciphertext):
"""Decrypt a piece of data with ElGamal.
:Parameter ciphertext: The piece of data to decrypt with ElGamal.
:Type ciphertext: byte string, long or a 2-item tuple as returned
by `encrypt`
:Return: A byte string if ciphertext was a byte string or a tuple
of byte strings. A long otherwise.
"""
return pubkey.decrypt(self, ciphertext)
def sign(self, M, K):
"""Sign a piece of data with ElGamal.
:Parameter M: The piece of data to sign with ElGamal. It may
not be longer in bit size than *p-1*.
:Type M: byte string or long
:Parameter K: A secret number, chosen randomly in the closed
range *[1,p-2]* and such that *gcd(k,p-1)=1*.
:Type K: long (recommended) or byte string (not recommended)
:attention: selection of *K* is crucial for security. Generating a
random number larger than *p-1* and taking the modulus by *p-1* is
**not** secure, since smaller values will occur more frequently.
Generating a random number systematically smaller than *p-1*
(e.g. *floor((p-1)/8)* random bytes) is also **not** secure.
In general, it shall not be possible for an attacker to know
the value of any bit of K.
:attention: The number *K* shall not be reused for any other
operation and shall be discarded immediately.
:attention: It is strongly recommended to have M be a digest created
via a cryptographic hash, otherwise an attacker may mount an
existential forgery attack.
:Return: A tuple with 2 longs.
"""
return pubkey.sign(self, M, K)
def verify(self, M, signature):
"""Verify the validity of an ElGamal signature.
:Parameter M: The expected message.
:Type M: byte string or long
:Parameter signature: The ElGamal signature to verify.
:Type signature: A tuple with 2 longs as return by `sign`
:Return: True if the signature is correct, False otherwise.
"""
return pubkey.verify(self, M, signature)
def _encrypt(self, M, K):
a=pow(self.g, K, self.p)
b=( M*pow(self.y, K, self.p) ) % self.p
return ( a,b )
def _decrypt(self, M):
if (not hasattr(self, 'x')):
raise TypeError('Private key not available in this object')
ax=pow(M[0], self.x, self.p)
plaintext=(M[1] * inverse(ax, self.p ) ) % self.p
return plaintext
def _sign(self, M, K):
if (not hasattr(self, 'x')):
raise TypeError('Private key not available in this object')
p1=self.p-1
if (GCD(K, p1)!=1):
raise ValueError('Bad K value: GCD(K,p-1)!=1')
a=pow(self.g, K, self.p)
t=(M-self.x*a) % p1
while t<0: t=t+p1
b=(t*inverse(K, p1)) % p1
return (a, b)
def _verify(self, M, sig):
if sig[0]<1 or sig[0]>p-1:
return 0
v1=pow(self.y, sig[0], self.p)
v1=(v1*pow(sig[0], sig[1], self.p)) % self.p
v2=pow(self.g, M, self.p)
if v1==v2:
return 1
return 0
def size(self):
return number.size(self.p) - 1
def has_private(self):
if hasattr(self, 'x'):
return 1
else:
return 0
def publickey(self):
return construct((self.p, self.g, self.y))
object=ElGamalobj
| ./CrossVul/dataset_final_sorted/CWE-310/py/good_3658_0 |
crossvul-python_data_bad_5581_2 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-310/py/bad_5581_2 |
crossvul-python_data_good_546_0 | #coding:utf-8
import os
import string
import sys
import time
import re
import StringIO
import tempfile
import threading
import traceback
import select
import pgpdump
import base64
import quopri
from datetime import datetime
from email.parser import Parser
from email.message import Message
from threading import Thread
import mailpile.platforms
from mailpile.i18n import gettext
from mailpile.i18n import ngettext as _n
from mailpile.crypto.state import *
from mailpile.crypto.mime import MimeSigningWrapper, MimeEncryptingWrapper
from mailpile.safe_popen import Popen, PIPE, Safe_Pipe
_ = lambda s: s
DEFAULT_KEYSERVERS = ["hkps://hkps.pool.sks-keyservers.net",
"hkp://subset.pool.sks-keyservers.net"]
DEFAULT_KEYSERVER_OPTIONS = [
'ca-cert-file=%s' % __file__.replace('.pyc', '.py')]
GPG_KEYID_LENGTH = 8
GNUPG_HOMEDIR = None # None=use what gpg uses
GPG_BINARY = mailpile.platforms.GetDefaultGnuPGCommand()
GPG_VERSIONS = {}
BLOCKSIZE = 65536
openpgp_algorithms = {1: _("RSA"),
2: _("RSA (encrypt only)"),
3: _("RSA (sign only)"),
16: _("ElGamal (encrypt only)"),
17: _("DSA"),
20: _("ElGamal (encrypt/sign) [COMPROMISED]"),
22: _("EdDSA"),
999: _("Unknown")}
# For details on type 20 compromisation, see
# http://lists.gnupg.org/pipermail/gnupg-announce/2003q4/000160.html
ENTROPY_LOCK = threading.Lock()
class GnuPGEventUpdater:
"""
Parse the GPG response into something useful for the Event Log.
"""
def __init__(self, event):
from mailpile.eventlog import Event
self.event = event or Event()
def _log(self, section, message):
data = section.get('gnupg', [])
if data:
data[-1].append(message)
def _log_private(self, message):
self._log(self.event.private_data, message)
def _log_public(self, message):
self._log(self.event.private_data, message)
self._log(self.event.data, message)
def running_gpg(self, why):
for section in (self.event.data, self.event.private_data):
data = section.get('gnupg', [])
data.append([why, int(time.time())])
section['gnupg'] = data
def update_args(self, args):
self._log_public(' '.join(args))
def update_sent_passphrase(self):
self._log_public('Sent passphrase')
def _parse_gpg_line(self, line):
if line.startswith('[GNUPG:] '):
pass # FIXME: Parse for machine-readable data
elif line.startswith('gpg: '):
self._log_private(line[5:].strip())
def update_stdout(self, line):
self._parse_gpg_line(line)
def update_stderr(self, line):
self._parse_gpg_line(line)
def update_return_code(self, code):
self._log_public('GnuPG returned %s' % code)
class GnuPGResultParser:
"""
Parse the GPG response into EncryptionInfo and SignatureInfo.
"""
def __init__(rp, decrypt_requires_MDC=True, debug=None):
rp.decrypt_requires_MDC = decrypt_requires_MDC
rp.debug = debug or (lambda t: True)
rp.signature_info = SignatureInfo()
rp.signature_info["protocol"] = "openpgp"
rp.encryption_info = EncryptionInfo()
rp.encryption_info["protocol"] = "openpgp"
rp.plaintext = ""
def parse(rp, retvals):
signature_info = rp.signature_info
encryption_info = rp.encryption_info
from mailpile.mailutils.emails import ExtractEmailAndName
# Belt & suspenders: work around some buggy GnuPG status codes
gpg_stderr = ''.join(retvals[1]["stderr"])
# First pass, set some initial state.
locked, missing = [], []
for data in retvals[1]["status"]:
keyword = data[0].strip() # The last keyword often ends in \n
if keyword == 'NEED_PASSPHRASE':
locked += [data[2]]
encryption_info.part_status = "lockedkey"
encryption_info["locked_keys"] = list(set(locked))
elif keyword == 'GOOD_PASSPHRASE':
encryption_info["locked_keys"] = []
elif keyword == "DECRYPTION_FAILED":
missing += [x[1].strip() for x in retvals[1]["status"]
if x[0] == "NO_SECKEY"]
if missing:
encryption_info["missing_keys"] = list(set(missing))
if encryption_info.part_status != "lockedkey":
if missing:
encryption_info.part_status = "missingkey"
else:
encryption_info.part_status = "error"
elif keyword == "DECRYPTION_OKAY":
if (rp.decrypt_requires_MDC and
'message was not integrity protected' in gpg_stderr):
rp.debug('Message not integrity protected, failing.')
encryption_info.part_status = "error"
else:
encryption_info.part_status = "decrypted"
rp.plaintext = "".join(retvals[1]["stdout"])
elif keyword == "ENC_TO":
keylist = encryption_info.get("have_keys", [])
if data[1] not in keylist:
keylist.append(data[1].strip())
encryption_info["have_keys"] = list(set(keylist))
elif keyword == "PLAINTEXT":
encryption_info.filename = data[3].strip()
elif signature_info.part_status == "none":
# Only one of these will ever be emitted per key, use
# this to set initial state. We may end up revising
# the status depending on more info later.
if keyword in ("GOODSIG", "BADSIG"):
email, fn = ExtractEmailAndName(
" ".join(data[2:]).decode('utf-8'))
signature_info["name"] = fn
signature_info["email"] = email
signature_info.part_status = ((keyword == "GOODSIG")
and "unverified"
or "invalid")
rp.plaintext = "".join(retvals[1]["stdout"])
elif keyword == "ERRSIG":
signature_info.part_status = "error"
signature_info["keyinfo"] = data[1]
signature_info["timestamp"] = int(data[5])
# Second pass, this may update/mutate the state set above
for data in retvals[1]["status"]:
keyword = data[0].strip() # The last keyword often ends in \n
if keyword == "NO_SECKEY":
keyid = data[1].strip()
if "missing_keys" not in encryption_info:
encryption_info["missing_keys"] = [keyid]
elif keyid not in encryption_info["missing_keys"]:
encryption_info["missing_keys"].append(keyid)
while keyid in encryption_info["have_keys"]:
encryption_info["have_keys"].remove(keyid)
elif keyword == "VALIDSIG":
# FIXME: Determine trust level, between new, unverified,
# verified, untrusted.
signature_info["keyinfo"] = data[1]
signature_info["timestamp"] = int(data[3])
elif keyword in ("EXPKEYSIG", "REVKEYSIG"):
email, fn = ExtractEmailAndName(
" ".join(data[2:]).decode('utf-8'))
signature_info["name"] = fn
signature_info["email"] = email
signature_info.part_status = ((keyword == "EXPKEYSIG")
and "expired"
or "revoked")
# FIXME: This appears to be spammy. Is my key borked, or
# is GnuPG being stupid?
#
# elif keyword == "KEYEXPIRED": # Ignoring: SIGEXPIRED
# signature_info.part_status = "expired"
elif keyword == "KEYREVOKED":
signature_info.part_status = "revoked"
elif keyword == "NO_PUBKEY":
signature_info.part_status = "unknown"
elif keyword in ("TRUST_ULTIMATE", "TRUST_FULLY"):
if signature_info.part_status == "unverified":
signature_info.part_status = "verified"
if encryption_info.part_status == "error":
rp.plaintext = ""
return rp
class GnuPGRecordParser:
def __init__(self):
self.keys = {}
self.curkeyid = None
self.curdata = None
self.record_fields = ["record", "validity", "keysize", "keytype",
"keyid", "creation_date", "expiration_date",
"uidhash", "ownertrust", "uid", "sigclass",
"capabilities", "flag", "sn", "hashtype",
"curve"]
self.record_types = ["pub", "sub", "ssb", "fpr", "uat", "sec", "tru",
"sig", "rev", "uid", "gpg", "rvk", "grp"]
self.record_parsers = [self.parse_pubkey, self.parse_subkey,
self.parse_subkey, self.parse_fingerprint,
self.parse_userattribute, self.parse_privkey,
self.parse_trust, self.parse_signature,
self.parse_revoke, self.parse_uidline,
self.parse_none, self.parse_revocation_key,
self.parse_keygrip]
self.dispatch = dict(zip(self.record_types, self.record_parsers))
def parse(self, lines):
for line in lines:
self.parse_line(line)
return self.keys
def parse_line(self, line):
line = dict(zip(self.record_fields,
map(lambda s: s.replace("\\x3a", ":"),
stubborn_decode(line).strip().split(":"))))
r = self.dispatch.get(line["record"], self.parse_unknown)
r(line)
def _parse_dates(self, line):
for ts in ('expiration_date', 'creation_date'):
if line.get(ts) and '-' not in line[ts]:
try:
unixtime = int(line[ts])
if unixtime > 946684800: # 2000-01-01
dt = datetime.fromtimestamp(unixtime)
line[ts] = dt.strftime('%Y-%m-%d')
except ValueError:
line[ts+'_unparsed'] = line[ts]
line[ts] = '1970-01-01'
def _parse_keydata(self, line):
line["keytype_name"] = _(openpgp_algorithms.get(int(line["keytype"]),
'Unknown'))
line["capabilities_map"] = {
"encrypt": "E" in line["capabilities"],
"sign": "S" in line["capabilities"],
"certify": "C" in line["capabilities"],
"authenticate": "A" in line["capabilities"],
}
line["disabled"] = "D" in line["capabilities"]
line["revoked"] = "r" in line["validity"]
self._parse_dates(line)
return line
def _clean_curdata(self):
for v in self.curdata.keys():
if self.curdata[v] == "":
del self.curdata[v]
del self.curdata["record"]
def parse_pubkey(self, line):
self.curkeyid = line["keyid"]
self.curdata = self.keys[self.curkeyid] = self._parse_keydata(line)
self.curdata["subkeys"] = []
self.curdata["uids"] = []
self.curdata["secret"] = (self.curdata["record"] == "sec")
self.parse_uidline(self.curdata)
self._clean_curdata()
def parse_subkey(self, line):
self.curdata = self._parse_keydata(line)
self.keys[self.curkeyid]["subkeys"].append(self.curdata)
self._clean_curdata()
def parse_fingerprint(self, line):
fpr = line["uid"]
self.curdata["fingerprint"] = fpr
if len(self.curkeyid) < len(fpr):
self.keys[fpr] = self.keys[self.curkeyid]
del(self.keys[self.curkeyid])
self.curkeyid = fpr
def parse_userattribute(self, line):
# TODO: We are currently ignoring user attributes as not useful.
# We may at some point want to use --attribute-fd and read
# in user photos and such?
pass
def parse_privkey(self, line):
self.parse_pubkey(line)
def parse_uidline(self, line):
email, name, comment = parse_uid(line["uid"])
self._parse_dates(line)
if email or name or comment:
self.keys[self.curkeyid]["uids"].append({
"email": email,
"name": name,
"comment": comment,
"creation_date": line["creation_date"]
})
else:
pass # This is the case where a uid or sec line have no
# information aside from the creation date, which we
# parse elsewhere. As these lines are effectively blank,
# we omit them to simplify presentation to the user.
def parse_trust(self, line):
# FIXME: We are currently ignoring commentary from the Trust DB.
pass
def parse_signature(self, line):
# FIXME: This is probably wrong; signatures are on UIDs and not
# the key itself. No? Yes? Figure this out.
if "signatures" not in self.keys[self.curkeyid]:
self.keys[self.curkeyid]["signatures"] = []
sig = {
"signer": line[9],
"signature_date": line[5],
"keyid": line[4],
"trust": line[10],
"keytype": line[4]
}
self.keys[self.curkeyid]["signatures"].append(sig)
def parse_keygrip(self, line):
self.curdata["keygrip"] = line["uid"]
def parse_revoke(self, line):
pass # FIXME
def parse_revocation_key(self, line):
pass # FIXME
def parse_unknown(self, line):
print "Unknown line with code '%s'" % (line,)
def parse_none(line):
pass
UID_PARSE_RE = "^([^\(\<]+?){0,1}( \((.+?)\)){0,1}( \<(.+?)\>){0,1}\s*$"
def stubborn_decode(text):
if isinstance(text, unicode):
return text
try:
return text.decode("utf-8")
except UnicodeDecodeError:
try:
return text.decode("iso-8859-1")
except UnicodeDecodeError:
return uidstr.decode("utf-8", "replace")
def parse_uid(uidstr):
matches = re.match(UID_PARSE_RE, uidstr)
if matches:
email = matches.groups(0)[4] or ""
comment = matches.groups(0)[2] or ""
name = matches.groups(0)[0] or ""
else:
if '@' in uidstr and ' ' not in uidstr:
email, name = uidstr, ""
else:
email, name = "", uidstr
comment = ""
return email, name, comment
class StreamReader(Thread):
def __init__(self, name, fd, callback, lines=True):
Thread.__init__(self, target=self.readin, args=(fd, callback))
self.name = name
self.state = 'startup'
self.lines = lines
self.start()
def __str__(self):
return '%s(%s/%s, lines=%s)' % (Thread.__str__(self),
self.name, self.state, self.lines)
def readin(self, fd, callback):
try:
if self.lines:
self.state = 'read'
for line in iter(fd.readline, b''):
self.state = 'callback'
callback(line)
self.state = 'read'
else:
while True:
self.state = 'read'
buf = fd.read(BLOCKSIZE)
self.state = 'callback'
callback(buf)
if buf == "":
break
except:
traceback.print_exc()
finally:
self.state = 'done'
fd.close()
class StreamWriter(Thread):
def __init__(self, name, fd, output, partial_write_ok=False):
Thread.__init__(self, target=self.writeout, args=(fd, output))
self.name = name
self.state = 'startup'
self.partial_write_ok = partial_write_ok
self.start()
def __str__(self):
return '%s(%s/%s)' % (Thread.__str__(self), self.name, self.state)
def writeout(self, fd, output):
if isinstance(output, (str, unicode)):
total = len(output)
output = StringIO.StringIO(output)
else:
total = 0
try:
while True:
self.state = 'read'
line = output.read(BLOCKSIZE)
if line == "":
break
self.state = 'write'
fd.write(line)
total -= len(line)
output.close()
except:
if not self.partial_write_ok:
print '%s: %s bytes left' % (self, total)
traceback.print_exc()
finally:
self.state = 'done'
fd.close()
DEBUG_GNUPG = False
class GnuPG:
"""
Wrap GnuPG and make all functionality feel Pythonic.
"""
ARMOR_BEGIN_SIGNED = '-----BEGIN PGP SIGNED MESSAGE-----'
ARMOR_BEGIN_SIGNATURE = '-----BEGIN PGP SIGNATURE-----'
ARMOR_END_SIGNED = '-----END PGP SIGNATURE-----'
ARMOR_END_SIGNATURE = '-----END PGP SIGNATURE-----'
ARMOR_BEGIN_ENCRYPTED = '-----BEGIN PGP MESSAGE-----'
ARMOR_END_ENCRYPTED = '-----END PGP MESSAGE-----'
ARMOR_BEGIN_PUB_KEY = '-----BEGIN PGP PUBLIC KEY BLOCK-----'
ARMOR_END_PUB_KEY = '-----END PGP PUBLIC KEY BLOCK-----'
LAST_KEY_USED = 'DEFAULT' # This is a 1-value global cache
def __init__(self, config,
session=None, use_agent=None, debug=False, dry_run=False,
event=None, passphrase=None):
global DEBUG_GNUPG
self.available = None
self.outputfds = ["stdout", "stderr", "status"]
self.errors = []
self.event = GnuPGEventUpdater(event)
self.session = session
self.config = config or (session and session.config) or None
if self.config:
DEBUG_GNUPG = ('gnupg' in self.config.sys.debug)
self.homedir = self.config.sys.gpg_home or GNUPG_HOMEDIR
self.gpgbinary = self.config.sys.gpg_binary or GPG_BINARY
self.passphrases = self.config.passphrases
self.passphrase = (passphrase if (passphrase is not None) else
self.passphrases['DEFAULT']).get_reader()
self.use_agent = (use_agent if (use_agent is not None)
else self.config.prefs.gpg_use_agent)
else:
self.homedir = GNUPG_HOMEDIR
self.gpgbinary = GPG_BINARY
self.passphrases = None
if passphrase:
self.passphrase = passphrase.get_reader()
else:
self.passphrase = None
self.use_agent = use_agent
self.dry_run = dry_run
self.debug = (self._debug_all if (debug or DEBUG_GNUPG)
else self._debug_none)
def prepare_passphrase(self, keyid, signing=False, decrypting=False):
"""Query the Mailpile secrets for a usable passphrase."""
def _use(kid, sps_reader):
self.passphrase = sps_reader
GnuPG.LAST_KEY_USED = kid
return True
if self.config:
message = []
if decrypting:
message.append(_("Your PGP key is needed for decrypting."))
if signing:
message.append(_("Your PGP key is needed for signing."))
match, sps = self.config.get_passphrase(keyid,
prompt=_('Unlock your encryption key'),
description=' '.join(message))
if match:
return _use(match, sps.get_reader())
self.passphrase = None # This *may* allow use of the GnuPG agent
return False
def _debug_all(self, msg):
if self.session:
self.session.debug(msg.rstrip())
else:
print '%s' % str(msg).rstrip()
def _debug_none(self, msg):
pass
def set_home(self, path):
self.homedir = path
def version(self):
"""Returns a string representing the GnuPG version number."""
self.event.running_gpg(_('Checking GnuPG version'))
retvals = self.run(["--version"], novercheck=True)
return retvals[1]["stdout"][0].split('\n')[0]
def version_tuple(self, update=False):
"""Returns a tuple representing the GnuPG version number."""
global GPG_VERSIONS
if update or not GPG_VERSIONS.get(self.gpgbinary):
match = re.search( "(\d+).(\d+).(\d+)", self.version() )
version = tuple(int(v) for v in match.groups())
GPG_VERSIONS[self.gpgbinary] = version
return GPG_VERSIONS[self.gpgbinary]
def gnupghome(self):
"""Returns the location of the GnuPG keyring"""
self.event.running_gpg(_('Checking GnuPG home directory'))
rv = self.run(["--version"], novercheck=True)[1]["stdout"][0]
for l in rv.splitlines():
if l.startswith('Home: '):
return os.path.expanduser(l[6:].strip())
return os.path.expanduser(os.getenv('GNUPGHOME', '~/.gnupg'))
def is_available(self):
try:
self.event.running_gpg(_('Checking GnuPG availability'))
self.version_tuple(update=True)
self.available = True
except OSError:
self.available = False
return self.available
def common_args(self, args=None, version=None, will_send_passphrase=False):
if args is None:
args = []
if version is None:
version = self.version_tuple()
args.insert(0, self.gpgbinary)
args.insert(1, "--utf8-strings")
args.insert(1, "--with-colons")
args.insert(1, "--verbose")
args.insert(1, "--batch")
args.insert(1, "--enable-progress-filter")
# Disable SHA1 in all things GnuPG
args[1:1] = ["--personal-digest-preferences=SHA512",
"--digest-algo=SHA512",
"--cert-digest-algo=SHA512"]
if (not self.use_agent) or will_send_passphrase:
if version < (1, 5):
args.insert(1, "--no-use-agent")
elif version > (2, 1, 11):
args.insert(1, "--pinentry-mode=loopback")
else:
raise ImportError('Mailpile requires GnuPG 1.4.x or 2.1.12+ !')
if self.homedir:
args.insert(1, "--homedir=%s" % self.homedir)
args.insert(1, "--status-fd=2")
if will_send_passphrase:
args.insert(2, "--passphrase-fd=0")
if self.dry_run:
args.insert(1, "--dry-run")
return args
def run(self,
args=None, gpg_input=None, outputfd=None, partial_read_ok=False,
send_passphrase=False, _raise=None, novercheck=False):
if novercheck:
version = (1, 4)
else:
version = self.version_tuple()
args = self.common_args(
args=list(args if args else []),
version=version,
will_send_passphrase=(self.passphrase and send_passphrase))
self.outputbuffers = dict([(x, []) for x in self.outputfds])
self.threads = {}
gpg_retcode = -1
proc = None
try:
if send_passphrase and (self.passphrase is None):
self.debug('Running WITHOUT PASSPHRASE %s' % ' '.join(args))
self.debug(''.join(traceback.format_stack()))
else:
self.debug('Running %s' % ' '.join(args))
# Here we go!
self.event.update_args(args)
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=0)
# GnuPG is a bit crazy, and requires that the passphrase
# be sent and the filehandle closed before anything else
# interesting happens.
if send_passphrase and self.passphrase is not None:
self.passphrase.seek(0, 0)
c = self.passphrase.read(BLOCKSIZE)
while c != '':
proc.stdin.write(c)
c = self.passphrase.read(BLOCKSIZE)
proc.stdin.write('\n')
self.event.update_sent_passphrase()
wtf = ' '.join(args)
self.threads = {
"stderr": StreamReader('gpgi-stderr(%s)' % wtf,
proc.stderr, self.parse_stderr)
}
if outputfd:
self.threads["stdout"] = StreamReader(
'gpgi-stdout-to-fd(%s)' % wtf,
proc.stdout, outputfd.write, lines=False)
else:
self.threads["stdout"] = StreamReader(
'gpgi-stdout-parsed(%s)' % wtf,
proc.stdout, self.parse_stdout)
if gpg_input:
# If we have output, we just stream it. Technically, this
# doesn't really need to be a thread at the moment.
self.debug('<<STDOUT<< %s' % gpg_input)
StreamWriter('gpgi-output(%s)' % wtf,
proc.stdin, gpg_input,
partial_write_ok=partial_read_ok).join()
else:
proc.stdin.close()
# Reap GnuPG
gpg_retcode = proc.wait()
finally:
# Close this so GPG will terminate. This should already have
# been done, but we're handling errors here...
if proc and proc.stdin:
proc.stdin.close()
# Update event with return code
self.event.update_return_code(gpg_retcode)
# Reap the threads
self._reap_threads()
if outputfd:
outputfd.close()
if gpg_retcode != 0 and _raise:
raise _raise('GnuPG failed, exit code: %s' % gpg_retcode)
return gpg_retcode, self.outputbuffers
def _reap_threads(self):
for tries in (1, 2, 3):
for name, thr in self.threads.iteritems():
if thr.isAlive():
thr.join(timeout=15)
if thr.isAlive() and tries > 1:
print 'WARNING: Failed to reap thread %s' % thr
def parse_status(self, line, *args):
self.debug('<<STATUS<< %s' % line)
line = line.replace("[GNUPG:] ", "")
if line == "":
return
elems = line.split(" ")
self.outputbuffers["status"].append(elems)
def parse_stdout(self, line):
self.event.update_stdout(line)
self.debug('<<STDOUT<< %s' % line)
self.outputbuffers["stdout"].append(line)
def parse_stderr(self, line):
self.event.update_stderr(line)
if line.startswith("[GNUPG:] "):
return self.parse_status(line)
self.debug('<<STDERR<< %s' % line)
self.outputbuffers["stderr"].append(line)
def parse_keylist(self, keylist):
rlp = GnuPGRecordParser()
return rlp.parse(keylist)
def list_keys(self, selectors=None):
"""
>>> g = GnuPG(None)
>>> g.list_keys()[0]
0
"""
list_keys = ["--fingerprint"]
for sel in set(selectors or []):
list_keys += ["--list-keys", sel]
if not selectors:
list_keys += ["--list-keys"]
self.event.running_gpg(_('Fetching GnuPG public key list (selectors=%s)'
) % ', '.join(selectors or []))
retvals = self.run(list_keys)
return self.parse_keylist(retvals[1]["stdout"])
def list_secret_keys(self, selectors=None):
#
# Note: The selectors that are passed by default work around a bug
# in GnuPG < 2.1, where --list-secret-keys does not list
# details about key capabilities or expiry for
# --list-secret-keys unless a selector is provided. A dot
# is reasonably likely to appear in all PGP keys, as it is
# a common component of e-mail addresses (and @ does not
# work as a selector for some reason...)
#
# The downside of this workaround is that keys with no e-mail
# address or an address like alice@localhost won't be found.
# So we disable this hack on GnuPG >= 2.1.
#
if not selectors and self.version_tuple() < (2, 1):
selectors = [".", "a", "e", "i", "p", "t", "k"]
list_keys = ["--fingerprint"]
if selectors:
for sel in selectors:
list_keys += ["--list-secret-keys", sel]
else:
list_keys += ["--list-secret-keys"]
self.event.running_gpg(_('Fetching GnuPG secret key list (selectors=%s)'
) % ', '.join(selectors or ['None']))
retvals = self.run(list_keys)
secret_keys = self.parse_keylist(retvals[1]["stdout"])
# Another unfortunate thing GPG does, is it hides the disabled
# state when listing secret keys; it seems internally only the
# public key is disabled. This makes it hard for us to reason about
# which keys can actually be used, so we compensate...
# *** FIXME JackDca 2018-09-21 - Above behaviour not seen in 2.1.18 if
# --with-colons is used (but true for human-readable output) so this
# code could be deleted.
list_keys = ["--fingerprint"]
for fprint in set(secret_keys):
list_keys += ["--list-keys", fprint]
retvals = self.run(list_keys)
public_keys = self.parse_keylist(retvals[1]["stdout"])
for fprint, info in public_keys.iteritems():
if fprint in set(secret_keys):
for k in ("disabled", "revoked"): # FIXME: Copy more?
secret_keys[fprint][k] = info[k]
return secret_keys
def import_keys(self, key_data=None):
"""
Imports gpg keys from a file object or string.
>>> key_data = open("testing/pub.key").read()
>>> g = GnuPG(None)
>>> g.import_keys(key_data)
{'failed': [], 'updated': [{'details_text': 'unchanged', 'details': 0, 'fingerprint': '08A650B8E2CBC1B02297915DC65626EED13C70DA'}], 'imported': [], 'results': {'sec_dups': 0, 'unchanged': 1, 'num_uids': 0, 'skipped_new_keys': 0, 'no_userids': 0, 'num_signatures': 0, 'num_revoked': 0, 'sec_imported': 0, 'sec_read': 0, 'not_imported': 0, 'count': 1, 'imported_rsa': 0, 'imported': 0, 'num_subkeys': 0}}
"""
self.event.running_gpg(_('Importing key to GnuPG key chain'))
retvals = self.run(["--import"], gpg_input=key_data)
return self._parse_import(retvals[1]["status"])
def _parse_import(self, output):
res = {"imported": [], "updated": [], "failed": []}
for x in output:
if x[0] == "IMPORTED":
res["imported"].append({
"fingerprint": x[1],
"username": x[2].rstrip()
})
elif x[0] == "IMPORT_OK":
reasons = {
"0": "unchanged",
"1": "new key",
"2": "new user IDs",
"4": "new signatures",
"8": "new subkeys",
"16": "contains private key",
"17": "contains new private key",
}
res["updated"].append({
"details": int(x[1]),
# FIXME: Reasons may be ORed! This does NOT handle that.
"details_text": reasons.get(x[1], str(x[1])),
"fingerprint": x[2].rstrip(),
})
elif x[0] == "IMPORT_PROBLEM":
reasons = {
"0": "no reason given",
"1": "invalid certificate",
"2": "issuer certificate missing",
"3": "certificate chain too long",
"4": "error storing certificate",
}
res["failed"].append({
"details": int(x[1]),
"details_text": reasons.get(x[1], str(x[1])),
"fingerprint": x[2].rstrip()
})
elif x[0] == "IMPORT_RES":
res["results"] = {
"count": int(x[1]),
"no_userids": int(x[2]),
"imported": int(x[3]),
"imported_rsa": int(x[4]),
"unchanged": int(x[5]),
"num_uids": int(x[6]),
"num_subkeys": int(x[7]),
"num_signatures": int(x[8]),
"num_revoked": int(x[9]),
"sec_read": int(x[10]),
"sec_imported": int(x[11]),
"sec_dups": int(x[12]),
"skipped_new_keys": int(x[13]),
"not_imported": int(x[14].rstrip()),
}
return res
def decrypt(self, data,
outputfd=None, passphrase=None, as_lines=False, require_MDC=True):
"""
Note that this test will fail if you don't replace the recipient with
one whose key you control.
>>> g = GnuPG(None)
>>> ct = g.encrypt("Hello, World", to=["smari@mailpile.is"])[1]
>>> g.decrypt(ct)["text"]
'Hello, World'
"""
if passphrase is not None:
self.passphrase = passphrase.get_reader()
elif GnuPG.LAST_KEY_USED:
# This is an opportunistic approach to passphrase usage... we
# just hope the passphrase we used last time will work again.
# If we are right, we are done. If we are wrong, the output
# will tell us which key IDs to look for in our secret stash.
self.prepare_passphrase(GnuPG.LAST_KEY_USED, decrypting=True)
self.event.running_gpg(_('Decrypting %d bytes of data') % len(data))
for tries in (1, 2):
retvals = self.run(["--decrypt"], gpg_input=data,
outputfd=outputfd,
send_passphrase=True)
if tries == 1:
keyid = None
for msg in reversed(retvals[1]['status']):
# Reverse order so DECRYPTION_OKAY overrides KEY_CONSIDERED.
# If decryption is not ok, look for good passphrase, retry.
if msg[0] == 'DECRYPTION_OKAY':
break
elif (msg[0] == 'NEED_PASSPHRASE') and (passphrase is None):
# This message is output by gpg 1.4 but not 2.1.
if self.prepare_passphrase(msg[2], decrypting=True):
keyid = msg[2]
break
elif (msg[0] == 'KEY_CONSIDERED') and (passphrase is None):
# This message is output by gpg 2.1 but not 1.4.
if self.prepare_passphrase(msg[1], decrypting=True):
keyid = msg[1]
break
if not keyid:
break
if as_lines:
as_lines = retvals[1]["stdout"]
retvals[1]["stdout"] = []
rp = GnuPGResultParser(decrypt_requires_MDC=require_MDC,
debug=self.debug).parse(retvals)
return (rp.signature_info, rp.encryption_info,
as_lines or rp.plaintext)
def base64_segment(self, dec_start, dec_end, skip, line_len, line_end = 2):
"""
Given the start and end index of a desired segment of decoded data,
this function finds smallest segment of an encoded base64 array that
when decoded will include the desired decoded segment.
It's assumed that the base64 data has a uniform line structure of
line_len encoded characters including line_end eol characters,
and that there are skip header characters preceding the base64 data.
"""
enc_start = 4*(dec_start/3)
dec_skip = dec_start - 3*enc_start/4
enc_start += line_end*(enc_start/(line_len-line_end))
enc_end = 4*(dec_end/3)
enc_end += line_end*(enc_end/(line_len-line_end))
return enc_start, enc_end, dec_skip
def pgp_packet_hdr_parse(self, header, prev_partial = False):
"""
Parse the header of a PGP packet to get the packet type, header length,
and data length. Extra trailing characters in header are ignored.
prev_partial indicates that the previous packet was a partial packet.
An illegal header returns type -1, lengths 0.
Header format is defined in RFC4880 section 4.
"""
hdr = bytearray(header.ljust( 6, chr(0)))
if not prev_partial:
hdr_len = 1
else:
hdr[1:] = hdr # Partial block headers don't have a tag
hdr[0] = 0 # Insert a dummy tag.
hdr_len = 0
is_partial = False
if prev_partial or (hdr[0] & 0xC0) == 0xC0:
# New format packet
ptag = hdr[0] & 0x3F
body_len = hdr[1]
lengthtype = 0
hdr_len += 1
if body_len < 192:
pass
elif body_len <= 223:
hdr_len += 1
body_len = ((body_len - 192) << 8) + hdr[2] + 192
elif body_len == 255:
hdr_len += 4
body_len = ( (hdr[2] << 24) + (hdr[3] << 16) +
(hdr[4] << 8) + hdr[5] )
else:
# Partial packet headers are only legal for data packets.
if not prev_partial and not ptag in {8,9,11,18}:
return (-1, 0, 0, False)
# Could do extra testing here.
is_partial = True
body_len = 1 << (hdr[1] & 0x1F)
elif (hdr[0] & 0xC0) == 0x80:
# Old format packet
ptag = (hdr[0] & 0x3C) >> 2
lengthtype = hdr[0] & 0x03
if lengthtype < 3:
hdr_len = 2
body_len = hdr[1]
if lengthtype > 0:
hdr_len = 3
body_len = (body_len << 8) + hdr[2]
if lengthtype > 1:
hdr_len = 5
body_len = (
(body_len << 16) + (hdr[3] << 8) + hdr[4] )
else:
# Kludgy extra test for compressed packets w/ "unknown" length
# gpg generates these in signed-only files. Check for valid
# compression algorithm id to minimize false positives.
if ptag != 8 or (hdr[1] < 1 or hdr[1] > 3):
return (-1, 0, 0, False)
hdr_len = 1
body_len = -1
else:
return (-1, 0, 0, False)
if hdr_len > len(header):
return (-1, 0, 0, False)
return ptag, hdr_len, body_len, is_partial
def sniff(self, data, encoding = None):
"""
Checks arbitrary data to see if it is a PGP object and returns a set
that indicates the kind(s) of object found. The names of the set
elements are based on RFC3156 content types with 'pgp-' stripped so
they can be used in sniffers for other protocols, e.g. S/MIME.
There are additional set elements 'armored' and 'unencrypted'.
This code should give no false negatives, but may give false positives.
For efficient handling of encoded data, only small segments are decoded.
Armored files are detected by their armor header alone.
Non-armored data is detected by looking for a sequence of valid PGP
packet headers.
"""
found = set()
is_base64 = False
is_quopri = False
line_len = 0
line_end = 1
enc_start = 0
enc_end = 0
dec_start = 0
skip = 0
ptag = 0
hdr_len = 0
body_len = 0
partial = False
offset_enc = 0
offset_dec = 0
offset_packet = 0
# Identify encoding and base64 line length.
if encoding and encoding.lower() == 'base64':
line_len = data.find('\n') + 1 # Assume uniform length
if line_len < 0:
line_len = len(data)
elif line_len > 1 and data[line_len-2] == '\r':
line_end = 2
if line_len - line_end > 76: # Maximum per RFC2045 6.8
return found
enc_end = line_len
try:
segment = base64.b64decode(data[enc_start:enc_end])
except TypeError:
return found
is_base64 = True
elif encoding and encoding.lower() == 'quoted-printable':
# Can't selectively decode quopri because encoded length is data
# dependent due to escapes! Just decode one medium length segment.
# This is enough to contain the first few packets of a long file.
try:
segment = quopri.decodestring(data[0:1500])
except TypeError:
return found # *** ? Docs don't list exceptions
is_quopri = True
else:
line_len = len(data)
segment = data # *** Shallow copy?
if not segment:
found = set()
elif not (ord(segment[0]) & 0x80):
# Not a PGP packet header if MSbit is 0. Check for armoured data.
found.add('armored')
if segment.startswith(self.ARMOR_BEGIN_SIGNED):
# Clearsigned
found.add('unencrypted')
found.add('signature')
elif segment.startswith(self.ARMOR_BEGIN_SIGNATURE):
# Detached signature
found.add('signature')
elif segment.startswith(self.ARMOR_BEGIN_ENCRYPTED):
# PGP uses the same armor header for encrypted and signed only
# Fortunately gpg --decrypt handles both!
found.add('encrypted')
elif segment.startswith(self.ARMOR_BEGIN_PUB_KEY):
found.add('key')
else:
found = set()
else:
# Could be PGP packet header. Check for sequence of legal headers.
while skip < len(segment) and body_len <> -1:
# Check this packet header.
prev_partial = partial
ptag, hdr_len, body_len, partial = (
self.pgp_packet_hdr_parse(segment[skip:], prev_partial) )
if prev_partial or partial:
pass
elif ptag == 11:
found.add('unencrypted') # Literal Data
elif ptag == 1:
found.add('encrypted') # Encrypted Session Key
elif ptag == 9:
found.add('encrypted') # Symmetrically Encrypted Data
elif ptag == 18:
found.add('encrypted') # Symmetrically Encrypted & MDC
elif ptag == 2:
found.add('signature') # Signature
elif ptag == 4:
found.add('signature') # One-Pass Signature
elif ptag == 6:
found.add('key') # Public Key
elif ptag == 14:
found.add('key') # Public Subkey
elif ptag == 8: # Compressed Data Packet
# This is a kludge. Signed, non-encrypted files made by gpg
# (but no other gpg files) consist of one compressed data
# packet of unknown length which contains the signature
# and data packets.
# This appears to be an interpretation of RFC4880 2.3.
# The compression prevents selective parsing of headers.
# So such packets are assumed to be signed messages.
if dec_start == 0 and body_len == -1:
found.add('signature')
found.add('unencrypted')
elif ptag < 0 or ptag > 19:
found = set()
return found
dec_start += hdr_len + body_len
skip = dec_start
if is_base64 and body_len <> -1:
enc_start, enc_end, skip = self.base64_segment( dec_start,
dec_start + 6, 0, line_len, line_end )
segment = base64.b64decode(data[enc_start:enc_end])
if is_base64 and body_len <> -1 and skip <> len(segment):
# End of last packet does not match end of data.
found = set()
return found
def remove_armor(self, text):
lines = text.strip().splitlines(True)
if lines[0].startswith(self.ARMOR_BEGIN_SIGNED):
for idx in reversed(range(0, len(lines))):
if lines[idx].startswith(self.ARMOR_BEGIN_SIGNATURE):
lines = lines[:idx]
while lines and lines[0].strip():
lines.pop(0)
break
return ''.join(lines).strip()
def verify(self, data, signature=None):
"""
>>> g = GnuPG(None)
>>> s = g.sign("Hello, World", _from="smari@mailpile.is",
clearsign=True)[1]
>>> g.verify(s)
"""
params = ["--verify"]
if signature:
sig = tempfile.NamedTemporaryFile()
sig.write(signature)
sig.flush()
params.append(sig.name)
params.append("-")
self.event.running_gpg(_('Checking signature in %d bytes of data'
) % len(data))
ret, retvals = self.run(params, gpg_input=data, partial_read_ok=True)
rp = GnuPGResultParser(debug=self.debug)
return rp.parse([None, retvals]).signature_info
def encrypt(self, data, tokeys=[], armor=True,
sign=False, fromkey=None, throw_keyids=False):
"""
>>> g = GnuPG(None)
>>> g.encrypt("Hello, World", to=["smari@mailpile.is"])[0]
0
"""
if tokeys:
action = ["--encrypt", "--yes", "--expert",
"--trust-model", "always"]
for r in tokeys:
action.append("--recipient")
action.append(r)
action.extend([])
self.event.running_gpg(_('Encrypting %d bytes of data to %s'
) % (len(data), ', '.join(tokeys)))
else:
action = ["--symmetric", "--yes", "--expert"]
self.event.running_gpg(_('Encrypting %d bytes of data with password'
) % len(data))
if armor:
action.append("--armor")
if sign:
action.append("--sign")
if sign and fromkey:
action.append("--local-user")
action.append(fromkey)
if throw_keyids:
action.append("--throw-keyids")
if fromkey:
self.prepare_passphrase(fromkey, signing=True)
retvals = self.run(action, gpg_input=data,
send_passphrase=(sign or not tokeys))
return retvals[0], "".join(retvals[1]["stdout"])
def sign(self, data,
fromkey=None, armor=True, detach=True, clearsign=False,
passphrase=None):
"""
>>> g = GnuPG(None)
>>> g.sign("Hello, World", fromkey="smari@mailpile.is")[0]
0
"""
if passphrase is not None:
self.passphrase = passphrase.get_reader()
if fromkey and passphrase is None:
self.prepare_passphrase(fromkey, signing=True)
if detach and not clearsign:
action = ["--detach-sign"]
elif clearsign:
action = ["--clearsign"]
else:
action = ["--sign"]
if armor:
action.append("--armor")
if fromkey:
action.append("--local-user")
action.append(fromkey)
self.event.running_gpg(_('Signing %d bytes of data with %s'
) % (len(data), fromkey or _('default')))
retvals = self.run(action, gpg_input=data, send_passphrase=True)
self.passphrase = None
return retvals[0], "".join(retvals[1]["stdout"])
def sign_key(self, keyid, signingkey=None):
action = ["--yes", "--sign-key", keyid]
if signingkey:
action.insert(1, "-u")
action.insert(2, signingkey)
self.event.running_gpg(_('Signing key %s with %s'
) % (keyid, signingkey or _('default')))
retvals = self.run(action, send_passphrase=True)
return retvals
def delete_key(self, key_fingerprint):
cmd = ['--yes', '--delete-secret-and-public-key', key_fingerprint]
return self.run(cmd)
def recv_key(self, keyid,
keyservers=DEFAULT_KEYSERVERS,
keyserver_options=DEFAULT_KEYSERVER_OPTIONS):
self.event.running_gpg(_('Downloading key %s from key servers'
) % (keyid))
for keyserver in keyservers:
cmd = ['--keyserver', keyserver,
'--recv-key', self._escape_hex_keyid_term(keyid)]
for opt in keyserver_options:
cmd[2:2] = ['--keyserver-options', opt]
retvals = self.run(cmd)
if 'unsupported' not in ''.join(retvals[1]["stdout"]):
break
return self._parse_import(retvals[1]["status"])
def search_key(self, term,
keyservers=DEFAULT_KEYSERVERS,
keyserver_options=DEFAULT_KEYSERVER_OPTIONS):
self.event.running_gpg(_('Searching for key for %s in key servers'
) % (term))
for keyserver in keyservers:
cmd = ['--keyserver', keyserver,
'--fingerprint',
'--search-key', self._escape_hex_keyid_term(term)]
for opt in keyserver_options:
cmd[2:2] = ['--keyserver-options', opt]
retvals = self.run(cmd)
if 'unsupported' not in ''.join(retvals[1]["stdout"]):
break
results = {}
lines = [x.strip().split(":") for x in retvals[1]["stdout"]]
curpub = None
for line in lines:
if line[0] == "info":
pass
elif line[0] == "pub":
curpub = line[1]
validity = line[6]
if line[5]:
if int(line[5]) < time.time():
validity += 'e'
results[curpub] = {
"created": datetime.fromtimestamp(int(line[4])),
"keytype_name": _(openpgp_algorithms.get(int(line[2]),
'Unknown')),
"keysize": line[3],
"validity": validity,
"uids": [],
"fingerprint": curpub
}
elif line[0] == "uid":
email, name, comment = parse_uid(line[1])
results[curpub]["uids"].append({"name": name,
"email": email,
"comment": comment})
return results
def get_pubkey(self, keyid):
return self.export_pubkeys(selectors=[keyid])
def export_pubkeys(self, selectors=None):
self.event.running_gpg(_('Exporting keys %s from keychain'
) % (selectors,))
retvals = self.run(['--armor',
'--export'] + (selectors or [])
)[1]["stdout"]
return "".join(retvals)
def export_privkeys(self, selectors=None):
retvals = self.run(['--armor',
'--export-secret-keys'] + (selectors or [])
)[1]["stdout"]
return "".join(retvals)
def address_to_keys(self, address):
res = {}
keys = self.list_keys(selectors=[address])
for key, props in keys.iteritems():
if any([x["email"] == address for x in props["uids"]]):
res[key] = props
return res
def _escape_hex_keyid_term(self, term):
"""Prepends a 0x to hexadecimal key ids.
For example, D13C70DA is converted to 0xD13C70DA. This is required
by version 2.x of GnuPG (and is accepted by 1.x).
"""
is_hex_keyid = False
if len(term) == GPG_KEYID_LENGTH or len(term) == 2*GPG_KEYID_LENGTH:
hex_digits = set(string.hexdigits)
is_hex_keyid = all(c in hex_digits for c in term)
if is_hex_keyid:
return '0x%s' % term
else:
return term
def chat(self, gpg_args, callback, *args, **kwargs):
"""This lets a callback have a chat with the GPG process..."""
gpg_args = [self.gpgbinary,
"--utf8-strings",
# Disable SHA1 in all things GnuPG
"--personal-digest-preferences=SHA512",
"--digest-algo=SHA512",
"--cert-digest-algo=SHA512",
# We're not a human!
"--no-tty",
"--command-fd=0",
"--status-fd=1"] + (gpg_args or [])
if self.homedir:
gpg_args.insert(1, "--homedir=%s" % self.homedir)
if self.version_tuple() > (2, 1):
gpg_args.insert(2, "--pinentry-mode=loopback")
else:
gpg_args.insert(2, "--no-use-agent")
proc = None
try:
# Here we go!
self.debug('Running %s' % ' '.join(gpg_args))
self.event.update_args(gpg_args)
proc = Popen(gpg_args, stdin=PIPE, stdout=PIPE, stderr=PIPE,
bufsize=0, long_running=True)
return callback(proc, *args, **kwargs)
finally:
# Close this so GPG will terminate. This should already have
# been done, but we're handling errors here...
if proc and proc.stdin:
proc.stdin.close()
if proc:
self.event.update_return_code(proc.wait())
else:
self.event.update_return_code(-1)
def GetKeys(gnupg, config, people):
keys = []
missing = []
ambig = []
# First, we go to the contact database and get a list of keys.
for person in set(people):
if '#' in person:
keys.append(person.rsplit('#', 1)[1])
else:
vcard = config.vcards.get_vcard(person)
if vcard:
# It is the VCard's job to give us the best key first.
lines = [vcl for vcl in vcard.get_all('KEY')
if vcl.value.startswith('data:application'
'/x-pgp-fingerprint,')]
if len(lines) > 0:
keys.append(lines[0].value.split(',', 1)[1])
else:
missing.append(person)
else:
missing.append(person)
# Load key data from gnupg for use below
if keys:
all_keys = gnupg.list_keys(selectors=keys)
else:
all_keys = {}
if missing:
# Keys are missing, so we try to just search the keychain
all_keys.update(gnupg.list_keys(selectors=missing))
found = []
for key_id, key in all_keys.iteritems():
for uid in key.get("uids", []):
if uid.get("email", None) in missing:
missing.remove(uid["email"])
found.append(uid["email"])
keys.append(key_id)
elif uid.get("email", None) in found:
ambig.append(uid["email"])
# Next, we go make sure all those keys are really in our keychain.
fprints = all_keys.keys()
for key in keys:
key = key.upper()
if key.startswith('0x'):
key = key[2:]
if key not in fprints:
match = [k for k in fprints if k.endswith(key)]
if len(match) == 0:
missing.append(key)
elif len(match) > 1:
ambig.append(key)
if missing:
raise KeyLookupError(_('Keys missing for %s'
) % ', '.join(missing), missing)
elif ambig:
ambig = list(set(ambig))
raise KeyLookupError(_('Keys ambiguous for %s'
) % ', '.join(ambig), ambig)
return keys
class OpenPGPMimeSigningWrapper(MimeSigningWrapper):
CONTAINER_PARAMS = (('micalg', 'pgp-sha512'),
('protocol', 'application/pgp-signature'))
SIGNATURE_TYPE = 'application/pgp-signature'
SIGNATURE_DESC = 'OpenPGP Digital Signature'
def crypto(self):
return GnuPG(self.config, event=self.event)
def get_keys(self, who):
return GetKeys(self.crypto(), self.config, who)
class OpenPGPMimeEncryptingWrapper(MimeEncryptingWrapper):
CONTAINER_PARAMS = (('protocol', 'application/pgp-encrypted'), )
ENCRYPTION_TYPE = 'application/pgp-encrypted'
ENCRYPTION_VERSION = 1
# FIXME: Define _encrypt, allow throw_keyids
def crypto(self):
return GnuPG(self.config, event=self.event)
def get_keys(self, who):
return GetKeys(self.crypto(), self.config, who)
class OpenPGPMimeSignEncryptWrapper(OpenPGPMimeEncryptingWrapper):
CONTAINER_PARAMS = (('protocol', 'application/pgp-encrypted'), )
ENCRYPTION_TYPE = 'application/pgp-encrypted'
ENCRYPTION_VERSION = 1
def crypto(self):
return GnuPG(self.config)
def _encrypt(self, message_text, tokeys=None, armor=False):
from_key = self.get_keys([self.sender])[0]
# FIXME: Allow throw_keyids here.
return self.crypto().encrypt(message_text,
tokeys=tokeys, armor=True,
sign=True, fromkey=from_key)
def _update_crypto_status(self, part):
part.signature_info.part_status = 'verified'
part.encryption_info.part_status = 'decrypted'
class GnuPGExpectScript(threading.Thread):
STARTUP = 'Startup'
START_GPG = 'Start GPG'
FINISHED = 'Finished'
SCRIPT = []
VARIABLES = {}
DESCRIPTION = 'GnuPG Expect Script'
RUNNING_STATES = [STARTUP, START_GPG]
DEFAULT_TIMEOUT = 60 # Infinite wait isn't desirable
def __init__(self, gnupg,
sps=None, event=None, variables={}, on_complete=None):
threading.Thread.__init__(self)
self.daemon = True
self._lock = threading.RLock()
self.before = ''
with self._lock:
self.state = self.STARTUP
self.gnupg = gnupg
self.event = event
self.variables = variables or self.VARIABLES
self._on_complete = [on_complete] if on_complete else []
self.main_script = self.SCRIPT[:]
self.sps = sps
if sps:
self.variables['passphrase'] = '!!<SPS'
def __str__(self):
return '%s: %s' % (threading.Thread.__str__(self), self.state)
running = property(lambda self: (self.state in self.RUNNING_STATES))
failed = property(lambda self: False)
def in_state(self, state):
pass
def set_state(self, state):
self.state = state
self.in_state(state)
def sendline(self, proc, line):
if line == '!!<SPS':
reader = self.sps.get_reader()
while True:
c = reader.read()
if c != '':
proc.stdin.write(c)
else:
proc.stdin.write('\n')
break
else:
proc.stdin.write(line.encode('utf-8'))
proc.stdin.write('\n')
def _expecter(self, proc, exp, timebox):
while timebox[0] > 0:
self.before += proc.stdout.read(1)
if exp in self.before:
self.before = self.before.split(exp)[0]
return True
return False
def expect_exact(self, proc, exp, timeout=None):
from mailpile.util import RunTimed, TimedOut
timeout = timeout if (timeout and timeout > 0) else self.DEFAULT_TIMEOUT
timebox = [timeout]
self.before = ''
try:
self.gnupg.debug('Expect: %s' % exp)
if RunTimed(timeout, self._expecter, proc, exp, timebox):
return True
else:
raise TimedOut()
except TimedOut:
timebox[0] = 0
self.gnupg.debug('Timed out')
print 'Boo! %s not found in %s' % (exp, self.before)
raise
def run_script(self, proc, script):
for exp, rpl, tmo, state in script:
self.expect_exact(proc, exp, timeout=tmo)
if rpl:
self.sendline(proc, (rpl % self.variables).strip())
if state:
self.set_state(state)
def gpg_args(self):
return ['--no-use-agent', '--list-keys']
def run(self):
try:
self.set_state(self.START_GPG)
gpg = self.gnupg
gpg.event.running_gpg(_(self.DESCRIPTION) % self.variables)
gpg.chat(self.gpg_args(), self.run_script, self.main_script)
self.set_state(self.FINISHED)
except:
import traceback
traceback.print_exc()
finally:
with self._lock:
if self.state != self.FINISHED:
self.state = 'Failed: ' + self.state
for name, callback in self._on_complete:
callback()
self._on_complete = None
def on_complete(self, name, callback):
with self._lock:
if self._on_complete is not None:
if name not in [o[0] for o in self._on_complete]:
self._on_complete.append((name, callback))
else:
callback()
class GnuPGBaseKeyGenerator(GnuPGExpectScript):
"""This is a background thread which generates a new PGP key."""
AWAITING_LOCK = 'Pending keygen'
KEY_SETUP = 'Key Setup'
GATHER_ENTROPY = 'Creating key'
CREATED_KEY = 'Created key'
HAVE_KEY = 'Have Key'
VARIABLES = {
'keytype': '1',
'bits': '2048',
'name': 'Mailpile Generated Key',
'email': '',
'comment': 'www.mailpile.is',
'passphrase': 'mailpile'}
DESCRIPTION = _('Creating a %(bits)s bit GnuPG key')
RUNNING_STATES = (GnuPGExpectScript.RUNNING_STATES +
[AWAITING_LOCK, KEY_SETUP, GATHER_ENTROPY, HAVE_KEY])
failed = property(lambda self: (not self.running and
not self.generated_key))
def __init__(self, *args, **kwargs):
super(GnuPGBaseKeyGenerator, self).__init__(*args, **kwargs)
self.generated_key = None
def in_state(self, state):
if state == self.HAVE_KEY:
self.generated_key = self.before.strip().split()[-1]
def run(self):
# In order to minimize risk of timeout during key generation (due to
# lack of entropy), we serialize them here using a global lock
self.set_state(self.AWAITING_LOCK)
self.event.message = _('Waiting to generate a %d bit GnuPG key.'
% self.variables['bits'])
with ENTROPY_LOCK:
self.event.data['keygen_gotlock'] = 1
self.event.message = _('Generating new %d bit PGP key.'
% self.variables['bits'])
super(GnuPGBaseKeyGenerator, self).run()
class GnuPG14KeyGenerator(GnuPGBaseKeyGenerator):
"""This is the GnuPG 1.4x specific PGP key generation script."""
B = GnuPGBaseKeyGenerator
# FIXME: If GnuPG starts asking for things in a different order,
# we'll needlessly fail. To address this, we need to make
# the expect logic smarter. For now, we just assume the GnuPG
# team will be hesitant to change things.
SCRIPT = [
('GET_LINE keygen.algo', '%(keytype)s', -1, B.KEY_SETUP),
('GET_LINE keygen.size', '%(bits)s', -1, None),
('GET_LINE keygen.valid', '0', -1, None),
('GET_LINE keygen.name', '%(name)s', -1, None),
('GET_LINE keygen.email', '%(email)s', -1, None),
('GET_LINE keygen.comment', '%(comment)s', -1, None),
('GET_HIDDEN passphrase', '%(passphrase)s', -1, None),
('GOT_IT', None, -1, B.GATHER_ENTROPY),
('KEY_CREATED', None, 7200, B.CREATED_KEY),
('\n', None, -1, B.HAVE_KEY)]
def gpg_args(self):
return ['--no-use-agent', '--allow-freeform-uid', '--gen-key']
class GnuPG21KeyGenerator(GnuPG14KeyGenerator):
"""This is the GnuPG 2.1.x specific PGP key generation script."""
# Note: We don't use the nice --quick-generate-key function, because
# it won't let us generate a usable key with custom parameters in
# a single pass. So using the existing expect logic turns out to
# be less work in practice. Oh well.
def gpg_args(self):
# --yes should keep GnuPG from complaining if there already exists
# a key with this UID.
return ['--yes', '--allow-freeform-uid', '--full-gen-key']
class GnuPGDummyKeyGenerator(GnuPGBaseKeyGenerator):
"""A dummy key generator class, for incompatible versions of GnuPG."""
DESCRIPTION = _('Unable to create a %(bits)s bit key, wrong GnuPG version')
def __init__(self, *args, **kwargs):
GnuPGBaseKeyGenerator.__init__(self, *args, **kwargs)
self.generated_key = False
def run(self):
with self._lock:
self.gnupg.event.running_gpg(_(self.DESCRIPTION) % self.variables)
self.set_state(self.FINISHED)
for name, callback in self._on_complete:
callback()
self._on_complete = None
def GnuPGKeyGenerator(gnupg, **kwargs):
"""Return an instanciated generator, depending on GnuPG version."""
version = gnupg.version_tuple()
if version < (1, 5):
return GnuPG14KeyGenerator(gnupg, **kwargs)
elif version >= (2, 1):
return GnuPG21KeyGenerator(gnupg, **kwargs)
else:
return GnuPGDummyKeyGenerator(gnupg, **kwargs)
# Reset our translation variable
_ = gettext
## Include the SKS keyserver certificate here ##
KEYSERVER_CERTIFICATE="""
-----BEGIN CERTIFICATE-----
MIIFizCCA3OgAwIBAgIJAK9zyLTPn4CPMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNV
BAYTAk5PMQ0wCwYDVQQIDARPc2xvMR4wHAYDVQQKDBVza3Mta2V5c2VydmVycy5u
ZXQgQ0ExHjAcBgNVBAMMFXNrcy1rZXlzZXJ2ZXJzLm5ldCBDQTAeFw0xMjEwMDkw
MDMzMzdaFw0yMjEwMDcwMDMzMzdaMFwxCzAJBgNVBAYTAk5PMQ0wCwYDVQQIDARP
c2xvMR4wHAYDVQQKDBVza3Mta2V5c2VydmVycy5uZXQgQ0ExHjAcBgNVBAMMFXNr
cy1rZXlzZXJ2ZXJzLm5ldCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
ggIBANdsWy4PXWNUCkS3L//nrd0GqN3dVwoBGZ6w94Tw2jPDPifegwxQozFXkG6I
6A4TK1CJLXPvfz0UP0aBYyPmTNadDinaB9T4jIwd4rnxl+59GiEmqkN3IfPsv5Jj
MkKUmJnvOT0DEVlEaO1UZIwx5WpfprB3mR81/qm4XkAgmYrmgnLXd/pJDAMk7y1F
45b5zWofiD5l677lplcIPRbFhpJ6kDTODXh/XEdtF71EAeaOdEGOvyGDmCO0GWqS
FDkMMPTlieLA/0rgFTcz4xwUYj/cD5e0ZBuSkYsYFAU3hd1cGfBue0cPZaQH2HYx
Qk4zXD8S3F4690fRhr+tki5gyG6JDR67aKp3BIGLqm7f45WkX1hYp+YXywmEziM4
aSbGYhx8hoFGfq9UcfPEvp2aoc8u5sdqjDslhyUzM1v3m3ZGbhwEOnVjljY6JJLx
MxagxnZZSAY424ZZ3t71E/Mn27dm2w+xFRuoy8JEjv1d+BT3eChM5KaNwrj0IO/y
u8kFIgWYA1vZ/15qMT+tyJTfyrNVV/7Df7TNeWyNqjJ5rBmt0M6NpHG7CrUSkBy9
p8JhimgjP5r0FlEkgg+lyD+V79H98gQfVgP3pbJICz0SpBQf2F/2tyS4rLm+49rP
fcOajiXEuyhpcmzgusAj/1FjrtlynH1r9mnNaX4e+rLWzvU5AgMBAAGjUDBOMB0G
A1UdDgQWBBTkwyoJFGfYTVISTpM8E+igjdq28zAfBgNVHSMEGDAWgBTkwyoJFGfY
TVISTpM8E+igjdq28zAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQAR
OXnYwu3g1ZjHyley3fZI5aLPsaE17cOImVTehC8DcIphm2HOMR/hYTTL+V0G4P+u
gH+6xeRLKSHMHZTtSBIa6GDL03434y9CBuwGvAFCMU2GV8w92/Z7apkAhdLToZA/
X/iWP2jeaVJhxgEcH8uPrnSlqoPBcKC9PrgUzQYfSZJkLmB+3jEa3HKruy1abJP5
gAdQvwvcPpvYRnIzUc9fZODsVmlHVFBCl2dlu/iHh2h4GmL4Da2rRkUMlbVTdioB
UYIvMycdOkpH5wJftzw7cpjsudGas0PARDXCFfGyKhwBRFY7Xp7lbjtU5Rz0Gc04
lPrhDf0pFE98Aw4jJRpFeWMjpXUEaG1cq7D641RpgcMfPFvOHY47rvDTS7XJOaUT
BwRjmDt896s6vMDcaG/uXJbQjuzmmx3W2Idyh3s5SI0GTHb0IwMKYb4eBUIpQOnB
cE77VnCYqKvN1NVYAqhWjXbY7XasZvszCRcOG+W3FqNaHOK/n/0ueb0uijdLan+U
f4p1bjbAox8eAOQS/8a3bzkJzdyBNUKGx1BIK2IBL9bn/HravSDOiNRSnZ/R3l9G
ZauX0tu7IIDlRCILXSyeazu0aj/vdT3YFQXPcvt5Fkf5wiNTo53f72/jYEJd6qph
WrpoKqrwGwTpRUCMhYIUt65hsTxCiJJ5nKe39h46sg==
-----END CERTIFICATE-----
"""
| ./CrossVul/dataset_final_sorted/CWE-310/py/good_546_0 |
crossvul-python_data_bad_5581_0 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-310/py/bad_5581_0 |
crossvul-python_data_bad_546_0 | #coding:utf-8
import os
import string
import sys
import time
import re
import StringIO
import tempfile
import threading
import traceback
import select
import pgpdump
import base64
import quopri
from datetime import datetime
from email.parser import Parser
from email.message import Message
from threading import Thread
import mailpile.platforms
from mailpile.i18n import gettext
from mailpile.i18n import ngettext as _n
from mailpile.crypto.state import *
from mailpile.crypto.mime import MimeSigningWrapper, MimeEncryptingWrapper
from mailpile.safe_popen import Popen, PIPE, Safe_Pipe
_ = lambda s: s
DEFAULT_KEYSERVERS = ["hkps://hkps.pool.sks-keyservers.net",
"hkp://subset.pool.sks-keyservers.net"]
DEFAULT_KEYSERVER_OPTIONS = [
'ca-cert-file=%s' % __file__.replace('.pyc', '.py')]
GPG_KEYID_LENGTH = 8
GNUPG_HOMEDIR = None # None=use what gpg uses
GPG_BINARY = mailpile.platforms.GetDefaultGnuPGCommand()
GPG_VERSIONS = {}
BLOCKSIZE = 65536
openpgp_algorithms = {1: _("RSA"),
2: _("RSA (encrypt only)"),
3: _("RSA (sign only)"),
16: _("ElGamal (encrypt only)"),
17: _("DSA"),
20: _("ElGamal (encrypt/sign) [COMPROMISED]"),
22: _("EdDSA"),
999: _("Unknown")}
# For details on type 20 compromisation, see
# http://lists.gnupg.org/pipermail/gnupg-announce/2003q4/000160.html
ENTROPY_LOCK = threading.Lock()
class GnuPGEventUpdater:
"""
Parse the GPG response into something useful for the Event Log.
"""
def __init__(self, event):
from mailpile.eventlog import Event
self.event = event or Event()
def _log(self, section, message):
data = section.get('gnupg', [])
if data:
data[-1].append(message)
def _log_private(self, message):
self._log(self.event.private_data, message)
def _log_public(self, message):
self._log(self.event.private_data, message)
self._log(self.event.data, message)
def running_gpg(self, why):
for section in (self.event.data, self.event.private_data):
data = section.get('gnupg', [])
data.append([why, int(time.time())])
section['gnupg'] = data
def update_args(self, args):
self._log_public(' '.join(args))
def update_sent_passphrase(self):
self._log_public('Sent passphrase')
def _parse_gpg_line(self, line):
if line.startswith('[GNUPG:] '):
pass # FIXME: Parse for machine-readable data
elif line.startswith('gpg: '):
self._log_private(line[5:].strip())
def update_stdout(self, line):
self._parse_gpg_line(line)
def update_stderr(self, line):
self._parse_gpg_line(line)
def update_return_code(self, code):
self._log_public('GnuPG returned %s' % code)
class GnuPGResultParser:
"""
Parse the GPG response into EncryptionInfo and SignatureInfo.
"""
def __init__(rp, decrypt_requires_MDC=True, debug=None):
rp.decrypt_requires_MDC = decrypt_requires_MDC
rp.debug = debug or (lambda t: True)
rp.signature_info = SignatureInfo()
rp.signature_info["protocol"] = "openpgp"
rp.encryption_info = EncryptionInfo()
rp.encryption_info["protocol"] = "openpgp"
rp.plaintext = ""
def parse(rp, retvals):
signature_info = rp.signature_info
encryption_info = rp.encryption_info
from mailpile.mailutils.emails import ExtractEmailAndName
# Belt & suspenders: work around some buggy GnuPG status codes
gpg_stderr = ''.join(retvals[1]["stderr"])
# First pass, set some initial state.
locked, missing = [], []
for data in retvals[1]["status"]:
keyword = data[0].strip() # The last keyword often ends in \n
if keyword == 'NEED_PASSPHRASE':
locked += [data[2]]
encryption_info.part_status = "lockedkey"
encryption_info["locked_keys"] = list(set(locked))
elif keyword == 'GOOD_PASSPHRASE':
encryption_info["locked_keys"] = []
elif keyword == "DECRYPTION_FAILED":
missing += [x[1].strip() for x in retvals[1]["status"]
if x[0] == "NO_SECKEY"]
if missing:
encryption_info["missing_keys"] = list(set(missing))
if encryption_info.part_status != "lockedkey":
if missing:
encryption_info.part_status = "missingkey"
else:
encryption_info.part_status = "error"
elif keyword == "DECRYPTION_OKAY":
if (rp.decrypt_requires_MDC and
'message was not integrity protected' in gpg_stderr):
rp.debug('Message not integrity protected, failing.')
encryption_info.part_status = "error"
else:
encryption_info.part_status = "decrypted"
rp.plaintext = "".join(retvals[1]["stdout"])
elif keyword == "ENC_TO":
keylist = encryption_info.get("have_keys", [])
if data[1] not in keylist:
keylist.append(data[1].strip())
encryption_info["have_keys"] = list(set(keylist))
elif keyword == "PLAINTEXT":
encryption_info.filename = data[3].strip()
elif signature_info.part_status == "none":
# Only one of these will ever be emitted per key, use
# this to set initial state. We may end up revising
# the status depending on more info later.
if keyword in ("GOODSIG", "BADSIG"):
email, fn = ExtractEmailAndName(
" ".join(data[2:]).decode('utf-8'))
signature_info["name"] = fn
signature_info["email"] = email
signature_info.part_status = ((keyword == "GOODSIG")
and "unverified"
or "invalid")
rp.plaintext = "".join(retvals[1]["stdout"])
elif keyword == "ERRSIG":
signature_info.part_status = "error"
signature_info["keyinfo"] = data[1]
signature_info["timestamp"] = int(data[5])
# Second pass, this may update/mutate the state set above
for data in retvals[1]["status"]:
keyword = data[0].strip() # The last keyword often ends in \n
if keyword == "NO_SECKEY":
keyid = data[1].strip()
if "missing_keys" not in encryption_info:
encryption_info["missing_keys"] = [keyid]
elif keyid not in encryption_info["missing_keys"]:
encryption_info["missing_keys"].append(keyid)
while keyid in encryption_info["have_keys"]:
encryption_info["have_keys"].remove(keyid)
elif keyword == "VALIDSIG":
# FIXME: Determine trust level, between new, unverified,
# verified, untrusted.
signature_info["keyinfo"] = data[1]
signature_info["timestamp"] = int(data[3])
elif keyword in ("EXPKEYSIG", "REVKEYSIG"):
email, fn = ExtractEmailAndName(
" ".join(data[2:]).decode('utf-8'))
signature_info["name"] = fn
signature_info["email"] = email
signature_info.part_status = ((keyword == "EXPKEYSIG")
and "expired"
or "revoked")
# FIXME: This appears to be spammy. Is my key borked, or
# is GnuPG being stupid?
#
# elif keyword == "KEYEXPIRED": # Ignoring: SIGEXPIRED
# signature_info.part_status = "expired"
elif keyword == "KEYREVOKED":
signature_info.part_status = "revoked"
elif keyword == "NO_PUBKEY":
signature_info.part_status = "unknown"
elif keyword in ("TRUST_ULTIMATE", "TRUST_FULLY"):
if signature_info.part_status == "unverified":
signature_info.part_status = "verified"
if encryption_info.part_status == "error":
rp.plaintext = ""
return rp
class GnuPGRecordParser:
def __init__(self):
self.keys = {}
self.curkeyid = None
self.curdata = None
self.record_fields = ["record", "validity", "keysize", "keytype",
"keyid", "creation_date", "expiration_date",
"uidhash", "ownertrust", "uid", "sigclass",
"capabilities", "flag", "sn", "hashtype",
"curve"]
self.record_types = ["pub", "sub", "ssb", "fpr", "uat", "sec", "tru",
"sig", "rev", "uid", "gpg", "rvk", "grp"]
self.record_parsers = [self.parse_pubkey, self.parse_subkey,
self.parse_subkey, self.parse_fingerprint,
self.parse_userattribute, self.parse_privkey,
self.parse_trust, self.parse_signature,
self.parse_revoke, self.parse_uidline,
self.parse_none, self.parse_revocation_key,
self.parse_keygrip]
self.dispatch = dict(zip(self.record_types, self.record_parsers))
def parse(self, lines):
for line in lines:
self.parse_line(line)
return self.keys
def parse_line(self, line):
line = dict(zip(self.record_fields,
map(lambda s: s.replace("\\x3a", ":"),
stubborn_decode(line).strip().split(":"))))
r = self.dispatch.get(line["record"], self.parse_unknown)
r(line)
def _parse_dates(self, line):
for ts in ('expiration_date', 'creation_date'):
if line.get(ts) and '-' not in line[ts]:
try:
unixtime = int(line[ts])
if unixtime > 946684800: # 2000-01-01
dt = datetime.fromtimestamp(unixtime)
line[ts] = dt.strftime('%Y-%m-%d')
except ValueError:
line[ts+'_unparsed'] = line[ts]
line[ts] = '1970-01-01'
def _parse_keydata(self, line):
line["keytype_name"] = _(openpgp_algorithms.get(int(line["keytype"]),
'Unknown'))
line["capabilities_map"] = {
"encrypt": "E" in line["capabilities"],
"sign": "S" in line["capabilities"],
"certify": "C" in line["capabilities"],
"authenticate": "A" in line["capabilities"],
}
line["disabled"] = "D" in line["capabilities"]
line["revoked"] = "r" in line["validity"]
self._parse_dates(line)
return line
def _clean_curdata(self):
for v in self.curdata.keys():
if self.curdata[v] == "":
del self.curdata[v]
del self.curdata["record"]
def parse_pubkey(self, line):
self.curkeyid = line["keyid"]
self.curdata = self.keys[self.curkeyid] = self._parse_keydata(line)
self.curdata["subkeys"] = []
self.curdata["uids"] = []
self.curdata["secret"] = (self.curdata["record"] == "sec")
self.parse_uidline(self.curdata)
self._clean_curdata()
def parse_subkey(self, line):
self.curdata = self._parse_keydata(line)
self.keys[self.curkeyid]["subkeys"].append(self.curdata)
self._clean_curdata()
def parse_fingerprint(self, line):
fpr = line["uid"]
self.curdata["fingerprint"] = fpr
if len(self.curkeyid) < len(fpr):
self.keys[fpr] = self.keys[self.curkeyid]
del(self.keys[self.curkeyid])
self.curkeyid = fpr
def parse_userattribute(self, line):
# TODO: We are currently ignoring user attributes as not useful.
# We may at some point want to use --attribute-fd and read
# in user photos and such?
pass
def parse_privkey(self, line):
self.parse_pubkey(line)
def parse_uidline(self, line):
email, name, comment = parse_uid(line["uid"])
self._parse_dates(line)
if email or name or comment:
self.keys[self.curkeyid]["uids"].append({
"email": email,
"name": name,
"comment": comment,
"creation_date": line["creation_date"]
})
else:
pass # This is the case where a uid or sec line have no
# information aside from the creation date, which we
# parse elsewhere. As these lines are effectively blank,
# we omit them to simplify presentation to the user.
def parse_trust(self, line):
# FIXME: We are currently ignoring commentary from the Trust DB.
pass
def parse_signature(self, line):
# FIXME: This is probably wrong; signatures are on UIDs and not
# the key itself. No? Yes? Figure this out.
if "signatures" not in self.keys[self.curkeyid]:
self.keys[self.curkeyid]["signatures"] = []
sig = {
"signer": line[9],
"signature_date": line[5],
"keyid": line[4],
"trust": line[10],
"keytype": line[4]
}
self.keys[self.curkeyid]["signatures"].append(sig)
def parse_keygrip(self, line):
self.curdata["keygrip"] = line["uid"]
def parse_revoke(self, line):
pass # FIXME
def parse_revocation_key(self, line):
pass # FIXME
def parse_unknown(self, line):
print "Unknown line with code '%s'" % (line,)
def parse_none(line):
pass
UID_PARSE_RE = "^([^\(\<]+?){0,1}( \((.+?)\)){0,1}( \<(.+?)\>){0,1}\s*$"
def stubborn_decode(text):
if isinstance(text, unicode):
return text
try:
return text.decode("utf-8")
except UnicodeDecodeError:
try:
return text.decode("iso-8859-1")
except UnicodeDecodeError:
return uidstr.decode("utf-8", "replace")
def parse_uid(uidstr):
matches = re.match(UID_PARSE_RE, uidstr)
if matches:
email = matches.groups(0)[4] or ""
comment = matches.groups(0)[2] or ""
name = matches.groups(0)[0] or ""
else:
if '@' in uidstr and ' ' not in uidstr:
email, name = uidstr, ""
else:
email, name = "", uidstr
comment = ""
return email, name, comment
class StreamReader(Thread):
def __init__(self, name, fd, callback, lines=True):
Thread.__init__(self, target=self.readin, args=(fd, callback))
self.name = name
self.state = 'startup'
self.lines = lines
self.start()
def __str__(self):
return '%s(%s/%s, lines=%s)' % (Thread.__str__(self),
self.name, self.state, self.lines)
def readin(self, fd, callback):
try:
if self.lines:
self.state = 'read'
for line in iter(fd.readline, b''):
self.state = 'callback'
callback(line)
self.state = 'read'
else:
while True:
self.state = 'read'
buf = fd.read(BLOCKSIZE)
self.state = 'callback'
callback(buf)
if buf == "":
break
except:
traceback.print_exc()
finally:
self.state = 'done'
fd.close()
class StreamWriter(Thread):
def __init__(self, name, fd, output, partial_write_ok=False):
Thread.__init__(self, target=self.writeout, args=(fd, output))
self.name = name
self.state = 'startup'
self.partial_write_ok = partial_write_ok
self.start()
def __str__(self):
return '%s(%s/%s)' % (Thread.__str__(self), self.name, self.state)
def writeout(self, fd, output):
if isinstance(output, (str, unicode)):
total = len(output)
output = StringIO.StringIO(output)
else:
total = 0
try:
while True:
self.state = 'read'
line = output.read(BLOCKSIZE)
if line == "":
break
self.state = 'write'
fd.write(line)
total -= len(line)
output.close()
except:
if not self.partial_write_ok:
print '%s: %s bytes left' % (self, total)
traceback.print_exc()
finally:
self.state = 'done'
fd.close()
DEBUG_GNUPG = False
class GnuPG:
"""
Wrap GnuPG and make all functionality feel Pythonic.
"""
ARMOR_BEGIN_SIGNED = '-----BEGIN PGP SIGNED MESSAGE-----'
ARMOR_BEGIN_SIGNATURE = '-----BEGIN PGP SIGNATURE-----'
ARMOR_END_SIGNED = '-----END PGP SIGNATURE-----'
ARMOR_END_SIGNATURE = '-----END PGP SIGNATURE-----'
ARMOR_BEGIN_ENCRYPTED = '-----BEGIN PGP MESSAGE-----'
ARMOR_END_ENCRYPTED = '-----END PGP MESSAGE-----'
ARMOR_BEGIN_PUB_KEY = '-----BEGIN PGP PUBLIC KEY BLOCK-----'
ARMOR_END_PUB_KEY = '-----END PGP PUBLIC KEY BLOCK-----'
LAST_KEY_USED = 'DEFAULT' # This is a 1-value global cache
def __init__(self, config,
session=None, use_agent=None, debug=False, dry_run=False,
event=None, passphrase=None):
global DEBUG_GNUPG
self.available = None
self.outputfds = ["stdout", "stderr", "status"]
self.errors = []
self.event = GnuPGEventUpdater(event)
self.session = session
self.config = config or (session and session.config) or None
if self.config:
DEBUG_GNUPG = ('gnupg' in self.config.sys.debug)
self.homedir = self.config.sys.gpg_home or GNUPG_HOMEDIR
self.gpgbinary = self.config.sys.gpg_binary or GPG_BINARY
self.passphrases = self.config.passphrases
self.passphrase = (passphrase if (passphrase is not None) else
self.passphrases['DEFAULT']).get_reader()
self.use_agent = (use_agent if (use_agent is not None)
else self.config.prefs.gpg_use_agent)
else:
self.homedir = GNUPG_HOMEDIR
self.gpgbinary = GPG_BINARY
self.passphrases = None
if passphrase:
self.passphrase = passphrase.get_reader()
else:
self.passphrase = None
self.use_agent = use_agent
self.dry_run = dry_run
self.debug = (self._debug_all if (debug or DEBUG_GNUPG)
else self._debug_none)
def prepare_passphrase(self, keyid, signing=False, decrypting=False):
"""Query the Mailpile secrets for a usable passphrase."""
def _use(kid, sps_reader):
self.passphrase = sps_reader
GnuPG.LAST_KEY_USED = kid
return True
if self.config:
message = []
if decrypting:
message.append(_("Your PGP key is needed for decrypting."))
if signing:
message.append(_("Your PGP key is needed for signing."))
match, sps = self.config.get_passphrase(keyid,
prompt=_('Unlock your encryption key'),
description=' '.join(message))
if match:
return _use(match, sps.get_reader())
self.passphrase = None # This *may* allow use of the GnuPG agent
return False
def _debug_all(self, msg):
if self.session:
self.session.debug(msg.rstrip())
else:
print '%s' % str(msg).rstrip()
def _debug_none(self, msg):
pass
def set_home(self, path):
self.homedir = path
def version(self):
"""Returns a string representing the GnuPG version number."""
self.event.running_gpg(_('Checking GnuPG version'))
retvals = self.run(["--version"], novercheck=True)
return retvals[1]["stdout"][0].split('\n')[0]
def version_tuple(self, update=False):
"""Returns a tuple representing the GnuPG version number."""
global GPG_VERSIONS
if update or not GPG_VERSIONS.get(self.gpgbinary):
match = re.search( "(\d+).(\d+).(\d+)", self.version() )
version = tuple(int(v) for v in match.groups())
GPG_VERSIONS[self.gpgbinary] = version
return GPG_VERSIONS[self.gpgbinary]
def gnupghome(self):
"""Returns the location of the GnuPG keyring"""
self.event.running_gpg(_('Checking GnuPG home directory'))
rv = self.run(["--version"], novercheck=True)[1]["stdout"][0]
for l in rv.splitlines():
if l.startswith('Home: '):
return os.path.expanduser(l[6:].strip())
return os.path.expanduser(os.getenv('GNUPGHOME', '~/.gnupg'))
def is_available(self):
try:
self.event.running_gpg(_('Checking GnuPG availability'))
self.version_tuple(update=True)
self.available = True
except OSError:
self.available = False
return self.available
def common_args(self, args=None, version=None, will_send_passphrase=False):
if args is None:
args = []
if version is None:
version = self.version_tuple()
args.insert(0, self.gpgbinary)
args.insert(1, "--utf8-strings")
args.insert(1, "--with-colons")
args.insert(1, "--verbose")
args.insert(1, "--batch")
args.insert(1, "--enable-progress-filter")
# Disable SHA1 in all things GnuPG
args[1:1] = ["--personal-digest-preferences=SHA512",
"--digest-algo=SHA512",
"--cert-digest-algo=SHA512"]
if (not self.use_agent) or will_send_passphrase:
if version < (1, 5):
args.insert(1, "--no-use-agent")
elif version > (2, 1, 11):
args.insert(1, "--pinentry-mode=loopback")
else:
raise ImportError('Mailpile requires GnuPG 1.4.x or 2.1.12+ !')
if self.homedir:
args.insert(1, "--homedir=%s" % self.homedir)
args.insert(1, "--status-fd=2")
if will_send_passphrase:
args.insert(2, "--passphrase-fd=0")
if self.dry_run:
args.insert(1, "--dry-run")
return args
def run(self,
args=None, gpg_input=None, outputfd=None, partial_read_ok=False,
send_passphrase=False, _raise=None, novercheck=False):
if novercheck:
version = (1, 4)
else:
version = self.version_tuple()
args = self.common_args(
args=list(args if args else []),
version=version,
will_send_passphrase=(self.passphrase and send_passphrase))
self.outputbuffers = dict([(x, []) for x in self.outputfds])
self.threads = {}
gpg_retcode = -1
proc = None
try:
if send_passphrase and (self.passphrase is None):
self.debug('Running WITHOUT PASSPHRASE %s' % ' '.join(args))
self.debug(''.join(traceback.format_stack()))
else:
self.debug('Running %s' % ' '.join(args))
# Here we go!
self.event.update_args(args)
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=0)
# GnuPG is a bit crazy, and requires that the passphrase
# be sent and the filehandle closed before anything else
# interesting happens.
if send_passphrase and self.passphrase is not None:
self.passphrase.seek(0, 0)
c = self.passphrase.read(BLOCKSIZE)
while c != '':
proc.stdin.write(c)
c = self.passphrase.read(BLOCKSIZE)
proc.stdin.write('\n')
self.event.update_sent_passphrase()
wtf = ' '.join(args)
self.threads = {
"stderr": StreamReader('gpgi-stderr(%s)' % wtf,
proc.stderr, self.parse_stderr)
}
if outputfd:
self.threads["stdout"] = StreamReader(
'gpgi-stdout-to-fd(%s)' % wtf,
proc.stdout, outputfd.write, lines=False)
else:
self.threads["stdout"] = StreamReader(
'gpgi-stdout-parsed(%s)' % wtf,
proc.stdout, self.parse_stdout)
if gpg_input:
# If we have output, we just stream it. Technically, this
# doesn't really need to be a thread at the moment.
self.debug('<<STDOUT<< %s' % gpg_input)
StreamWriter('gpgi-output(%s)' % wtf,
proc.stdin, gpg_input,
partial_write_ok=partial_read_ok).join()
else:
proc.stdin.close()
# Reap GnuPG
gpg_retcode = proc.wait()
finally:
# Close this so GPG will terminate. This should already have
# been done, but we're handling errors here...
if proc and proc.stdin:
proc.stdin.close()
# Update event with return code
self.event.update_return_code(gpg_retcode)
# Reap the threads
self._reap_threads()
if outputfd:
outputfd.close()
if gpg_retcode != 0 and _raise:
raise _raise('GnuPG failed, exit code: %s' % gpg_retcode)
return gpg_retcode, self.outputbuffers
def _reap_threads(self):
for tries in (1, 2, 3):
for name, thr in self.threads.iteritems():
if thr.isAlive():
thr.join(timeout=15)
if thr.isAlive() and tries > 1:
print 'WARNING: Failed to reap thread %s' % thr
def parse_status(self, line, *args):
self.debug('<<STATUS<< %s' % line)
line = line.replace("[GNUPG:] ", "")
if line == "":
return
elems = line.split(" ")
self.outputbuffers["status"].append(elems)
def parse_stdout(self, line):
self.event.update_stdout(line)
self.debug('<<STDOUT<< %s' % line)
self.outputbuffers["stdout"].append(line)
def parse_stderr(self, line):
self.event.update_stderr(line)
if line.startswith("[GNUPG:] "):
return self.parse_status(line)
self.debug('<<STDERR<< %s' % line)
self.outputbuffers["stderr"].append(line)
def parse_keylist(self, keylist):
rlp = GnuPGRecordParser()
return rlp.parse(keylist)
def list_keys(self, selectors=None):
"""
>>> g = GnuPG(None)
>>> g.list_keys()[0]
0
"""
list_keys = ["--fingerprint"]
for sel in set(selectors or []):
list_keys += ["--list-keys", sel]
if not selectors:
list_keys += ["--list-keys"]
self.event.running_gpg(_('Fetching GnuPG public key list (selectors=%s)'
) % ', '.join(selectors or []))
retvals = self.run(list_keys)
return self.parse_keylist(retvals[1]["stdout"])
def list_secret_keys(self, selectors=None):
#
# Note: The selectors that are passed by default work around a bug
# in GnuPG < 2.1, where --list-secret-keys does not list
# details about key capabilities or expiry for
# --list-secret-keys unless a selector is provided. A dot
# is reasonably likely to appear in all PGP keys, as it is
# a common component of e-mail addresses (and @ does not
# work as a selector for some reason...)
#
# The downside of this workaround is that keys with no e-mail
# address or an address like alice@localhost won't be found.
# So we disable this hack on GnuPG >= 2.1.
#
if not selectors and self.version_tuple() < (2, 1):
selectors = [".", "a", "e", "i", "p", "t", "k"]
list_keys = ["--fingerprint"]
if selectors:
for sel in selectors:
list_keys += ["--list-secret-keys", sel]
else:
list_keys += ["--list-secret-keys"]
self.event.running_gpg(_('Fetching GnuPG secret key list (selectors=%s)'
) % ', '.join(selectors or ['None']))
retvals = self.run(list_keys)
secret_keys = self.parse_keylist(retvals[1]["stdout"])
# Another unfortunate thing GPG does, is it hides the disabled
# state when listing secret keys; it seems internally only the
# public key is disabled. This makes it hard for us to reason about
# which keys can actually be used, so we compensate...
list_keys = ["--fingerprint"]
for fprint in set(secret_keys):
list_keys += ["--list-keys", fprint]
retvals = self.run(list_keys)
public_keys = self.parse_keylist(retvals[1]["stdout"])
for fprint, info in public_keys.iteritems():
if fprint in set(secret_keys):
for k in ("disabled", "revoked"): # FIXME: Copy more?
secret_keys[fprint][k] = info[k]
return secret_keys
def import_keys(self, key_data=None):
"""
Imports gpg keys from a file object or string.
>>> key_data = open("testing/pub.key").read()
>>> g = GnuPG(None)
>>> g.import_keys(key_data)
{'failed': [], 'updated': [{'details_text': 'unchanged', 'details': 0, 'fingerprint': '08A650B8E2CBC1B02297915DC65626EED13C70DA'}], 'imported': [], 'results': {'sec_dups': 0, 'unchanged': 1, 'num_uids': 0, 'skipped_new_keys': 0, 'no_userids': 0, 'num_signatures': 0, 'num_revoked': 0, 'sec_imported': 0, 'sec_read': 0, 'not_imported': 0, 'count': 1, 'imported_rsa': 0, 'imported': 0, 'num_subkeys': 0}}
"""
self.event.running_gpg(_('Importing key to GnuPG key chain'))
retvals = self.run(["--import"], gpg_input=key_data)
return self._parse_import(retvals[1]["status"])
def _parse_import(self, output):
res = {"imported": [], "updated": [], "failed": []}
for x in output:
if x[0] == "IMPORTED":
res["imported"].append({
"fingerprint": x[1],
"username": x[2].rstrip()
})
elif x[0] == "IMPORT_OK":
reasons = {
"0": "unchanged",
"1": "new key",
"2": "new user IDs",
"4": "new signatures",
"8": "new subkeys",
"16": "contains private key",
"17": "contains new private key",
}
res["updated"].append({
"details": int(x[1]),
# FIXME: Reasons may be ORed! This does NOT handle that.
"details_text": reasons.get(x[1], str(x[1])),
"fingerprint": x[2].rstrip(),
})
elif x[0] == "IMPORT_PROBLEM":
reasons = {
"0": "no reason given",
"1": "invalid certificate",
"2": "issuer certificate missing",
"3": "certificate chain too long",
"4": "error storing certificate",
}
res["failed"].append({
"details": int(x[1]),
"details_text": reasons.get(x[1], str(x[1])),
"fingerprint": x[2].rstrip()
})
elif x[0] == "IMPORT_RES":
res["results"] = {
"count": int(x[1]),
"no_userids": int(x[2]),
"imported": int(x[3]),
"imported_rsa": int(x[4]),
"unchanged": int(x[5]),
"num_uids": int(x[6]),
"num_subkeys": int(x[7]),
"num_signatures": int(x[8]),
"num_revoked": int(x[9]),
"sec_read": int(x[10]),
"sec_imported": int(x[11]),
"sec_dups": int(x[12]),
"skipped_new_keys": int(x[13]),
"not_imported": int(x[14].rstrip()),
}
return res
def decrypt(self, data,
outputfd=None, passphrase=None, as_lines=False, require_MDC=True):
"""
Note that this test will fail if you don't replace the recipient with
one whose key you control.
>>> g = GnuPG(None)
>>> ct = g.encrypt("Hello, World", to=["smari@mailpile.is"])[1]
>>> g.decrypt(ct)["text"]
'Hello, World'
"""
if passphrase is not None:
self.passphrase = passphrase.get_reader()
elif GnuPG.LAST_KEY_USED:
# This is an opportunistic approach to passphrase usage... we
# just hope the passphrase we used last time will work again.
# If we are right, we are done. If we are wrong, the output
# will tell us which key IDs to look for in our secret stash.
self.prepare_passphrase(GnuPG.LAST_KEY_USED, decrypting=True)
self.event.running_gpg(_('Decrypting %d bytes of data') % len(data))
for tries in (1, 2):
retvals = self.run(["--decrypt"], gpg_input=data,
outputfd=outputfd,
send_passphrase=True)
if tries == 1:
keyid = None
for msg in reversed(retvals[1]['status']):
# Reverse order so DECRYPTION_OKAY overrides KEY_CONSIDERED.
# If decryption is not ok, look for good passphrase, retry.
if msg[0] == 'DECRYPTION_OKAY':
break
elif (msg[0] == 'NEED_PASSPHRASE') and (passphrase is None):
# This message is output by gpg 1.4 but not 2.1.
if self.prepare_passphrase(msg[2], decrypting=True):
keyid = msg[2]
break
elif (msg[0] == 'KEY_CONSIDERED') and (passphrase is None):
# This message is output by gpg 2.1 but not 1.4.
if self.prepare_passphrase(msg[1], decrypting=True):
keyid = msg[1]
break
if not keyid:
break
if as_lines:
as_lines = retvals[1]["stdout"]
retvals[1]["stdout"] = []
rp = GnuPGResultParser(decrypt_requires_MDC=require_MDC,
debug=self.debug).parse(retvals)
return (rp.signature_info, rp.encryption_info,
as_lines or rp.plaintext)
def base64_segment(self, dec_start, dec_end, skip, line_len, line_end = 2):
"""
Given the start and end index of a desired segment of decoded data,
this function finds smallest segment of an encoded base64 array that
when decoded will include the desired decoded segment.
It's assumed that the base64 data has a uniform line structure of
line_len encoded characters including line_end eol characters,
and that there are skip header characters preceding the base64 data.
"""
enc_start = 4*(dec_start/3)
dec_skip = dec_start - 3*enc_start/4
enc_start += line_end*(enc_start/(line_len-line_end))
enc_end = 4*(dec_end/3)
enc_end += line_end*(enc_end/(line_len-line_end))
return enc_start, enc_end, dec_skip
def pgp_packet_hdr_parse(self, header, prev_partial = False):
"""
Parse the header of a PGP packet to get the packet type, header length,
and data length. Extra trailing characters in header are ignored.
prev_partial indicates that the previous packet was a partial packet.
An illegal header returns type -1, lengths 0.
Header format is defined in RFC4880 section 4.
"""
hdr = bytearray(header.ljust( 6, chr(0)))
if not prev_partial:
hdr_len = 1
else:
hdr[1:] = hdr # Partial block headers don't have a tag
hdr[0] = 0 # Insert a dummy tag.
hdr_len = 0
is_partial = False
if prev_partial or (hdr[0] & 0xC0) == 0xC0:
# New format packet
ptag = hdr[0] & 0x3F
body_len = hdr[1]
lengthtype = 0
hdr_len += 1
if body_len < 192:
pass
elif body_len <= 223:
hdr_len += 1
body_len = ((body_len - 192) << 8) + hdr[2] + 192
elif body_len == 255:
hdr_len += 4
body_len = ( (hdr[2] << 24) + (hdr[3] << 16) +
(hdr[4] << 8) + hdr[5] )
else:
# Partial packet headers are only legal for data packets.
if not prev_partial and not ptag in {8,9,11,18}:
return (-1, 0, 0, False)
# Could do extra testing here.
is_partial = True
body_len = 1 << (hdr[1] & 0x1F)
elif (hdr[0] & 0xC0) == 0x80:
# Old format packet
ptag = (hdr[0] & 0x3C) >> 2
lengthtype = hdr[0] & 0x03
if lengthtype < 3:
hdr_len = 2
body_len = hdr[1]
if lengthtype > 0:
hdr_len = 3
body_len = (body_len << 8) + hdr[2]
if lengthtype > 1:
hdr_len = 5
body_len = (
(body_len << 16) + (hdr[3] << 8) + hdr[4] )
else:
# Kludgy extra test for compressed packets w/ "unknown" length
# gpg generates these in signed-only files. Check for valid
# compression algorithm id to minimize false positives.
if ptag != 8 or (hdr[1] < 1 or hdr[1] > 3):
return (-1, 0, 0, False)
hdr_len = 1
body_len = -1
else:
return (-1, 0, 0, False)
if hdr_len > len(header):
return (-1, 0, 0, False)
return ptag, hdr_len, body_len, is_partial
def sniff(self, data, encoding = None):
"""
Checks arbitrary data to see if it is a PGP object and returns a set
that indicates the kind(s) of object found. The names of the set
elements are based on RFC3156 content types with 'pgp-' stripped so
they can be used in sniffers for other protocols, e.g. S/MIME.
There are additional set elements 'armored' and 'unencrypted'.
This code should give no false negatives, but may give false positives.
For efficient handling of encoded data, only small segments are decoded.
Armored files are detected by their armor header alone.
Non-armored data is detected by looking for a sequence of valid PGP
packet headers.
"""
found = set()
is_base64 = False
is_quopri = False
line_len = 0
line_end = 1
enc_start = 0
enc_end = 0
dec_start = 0
skip = 0
ptag = 0
hdr_len = 0
body_len = 0
partial = False
offset_enc = 0
offset_dec = 0
offset_packet = 0
# Identify encoding and base64 line length.
if encoding and encoding.lower() == 'base64':
line_len = data.find('\n') + 1 # Assume uniform length
if line_len < 0:
line_len = len(data)
elif line_len > 1 and data[line_len-2] == '\r':
line_end = 2
if line_len - line_end > 76: # Maximum per RFC2045 6.8
return found
enc_end = line_len
try:
segment = base64.b64decode(data[enc_start:enc_end])
except TypeError:
return found
is_base64 = True
elif encoding and encoding.lower() == 'quoted-printable':
# Can't selectively decode quopri because encoded length is data
# dependent due to escapes! Just decode one medium length segment.
# This is enough to contain the first few packets of a long file.
try:
segment = quopri.decodestring(data[0:1500])
except TypeError:
return found # *** ? Docs don't list exceptions
is_quopri = True
else:
line_len = len(data)
segment = data # *** Shallow copy?
if not segment:
found = set()
elif not (ord(segment[0]) & 0x80):
# Not a PGP packet header if MSbit is 0. Check for armoured data.
found.add('armored')
if segment.startswith(self.ARMOR_BEGIN_SIGNED):
# Clearsigned
found.add('unencrypted')
found.add('signature')
elif segment.startswith(self.ARMOR_BEGIN_SIGNATURE):
# Detached signature
found.add('signature')
elif segment.startswith(self.ARMOR_BEGIN_ENCRYPTED):
# PGP uses the same armor header for encrypted and signed only
# Fortunately gpg --decrypt handles both!
found.add('encrypted')
elif segment.startswith(self.ARMOR_BEGIN_PUB_KEY):
found.add('key')
else:
found = set()
else:
# Could be PGP packet header. Check for sequence of legal headers.
while skip < len(segment) and body_len <> -1:
# Check this packet header.
prev_partial = partial
ptag, hdr_len, body_len, partial = (
self.pgp_packet_hdr_parse(segment[skip:], prev_partial) )
if prev_partial or partial:
pass
elif ptag == 11:
found.add('unencrypted') # Literal Data
elif ptag == 1:
found.add('encrypted') # Encrypted Session Key
elif ptag == 9:
found.add('encrypted') # Symmetrically Encrypted Data
elif ptag == 18:
found.add('encrypted') # Symmetrically Encrypted & MDC
elif ptag == 2:
found.add('signature') # Signature
elif ptag == 4:
found.add('signature') # One-Pass Signature
elif ptag == 6:
found.add('key') # Public Key
elif ptag == 14:
found.add('key') # Public Subkey
elif ptag == 8: # Compressed Data Packet
# This is a kludge. Signed, non-encrypted files made by gpg
# (but no other gpg files) consist of one compressed data
# packet of unknown length which contains the signature
# and data packets.
# This appears to be an interpretation of RFC4880 2.3.
# The compression prevents selective parsing of headers.
# So such packets are assumed to be signed messages.
if dec_start == 0 and body_len == -1:
found.add('signature')
found.add('unencrypted')
elif ptag < 0 or ptag > 19:
found = set()
return found
dec_start += hdr_len + body_len
skip = dec_start
if is_base64 and body_len <> -1:
enc_start, enc_end, skip = self.base64_segment( dec_start,
dec_start + 6, 0, line_len, line_end )
segment = base64.b64decode(data[enc_start:enc_end])
if is_base64 and body_len <> -1 and skip <> len(segment):
# End of last packet does not match end of data.
found = set()
return found
def remove_armor(self, text):
lines = text.strip().splitlines(True)
if lines[0].startswith(self.ARMOR_BEGIN_SIGNED):
for idx in reversed(range(0, len(lines))):
if lines[idx].startswith(self.ARMOR_BEGIN_SIGNATURE):
lines = lines[:idx]
while lines and lines[0].strip():
lines.pop(0)
break
return ''.join(lines).strip()
def verify(self, data, signature=None):
"""
>>> g = GnuPG(None)
>>> s = g.sign("Hello, World", _from="smari@mailpile.is",
clearsign=True)[1]
>>> g.verify(s)
"""
params = ["--verify"]
if signature:
sig = tempfile.NamedTemporaryFile()
sig.write(signature)
sig.flush()
params.append(sig.name)
params.append("-")
self.event.running_gpg(_('Checking signature in %d bytes of data'
) % len(data))
ret, retvals = self.run(params, gpg_input=data, partial_read_ok=True)
rp = GnuPGResultParser(debug=self.debug)
return rp.parse([None, retvals]).signature_info
def encrypt(self, data, tokeys=[], armor=True,
sign=False, fromkey=None, throw_keyids=False):
"""
>>> g = GnuPG(None)
>>> g.encrypt("Hello, World", to=["smari@mailpile.is"])[0]
0
"""
if tokeys:
action = ["--encrypt", "--yes", "--expert",
"--trust-model", "always"]
for r in tokeys:
action.append("--recipient")
action.append(r)
action.extend([])
self.event.running_gpg(_('Encrypting %d bytes of data to %s'
) % (len(data), ', '.join(tokeys)))
else:
action = ["--symmetric", "--yes", "--expert"]
self.event.running_gpg(_('Encrypting %d bytes of data with password'
) % len(data))
if armor:
action.append("--armor")
if sign:
action.append("--sign")
if sign and fromkey:
action.append("--local-user")
action.append(fromkey)
if throw_keyids:
action.append("--throw-keyids")
if fromkey:
self.prepare_passphrase(fromkey, signing=True)
retvals = self.run(action, gpg_input=data,
send_passphrase=(sign or not tokeys))
return retvals[0], "".join(retvals[1]["stdout"])
def sign(self, data,
fromkey=None, armor=True, detach=True, clearsign=False,
passphrase=None):
"""
>>> g = GnuPG(None)
>>> g.sign("Hello, World", fromkey="smari@mailpile.is")[0]
0
"""
if passphrase is not None:
self.passphrase = passphrase.get_reader()
if fromkey and passphrase is None:
self.prepare_passphrase(fromkey, signing=True)
if detach and not clearsign:
action = ["--detach-sign"]
elif clearsign:
action = ["--clearsign"]
else:
action = ["--sign"]
if armor:
action.append("--armor")
if fromkey:
action.append("--local-user")
action.append(fromkey)
self.event.running_gpg(_('Signing %d bytes of data with %s'
) % (len(data), fromkey or _('default')))
retvals = self.run(action, gpg_input=data, send_passphrase=True)
self.passphrase = None
return retvals[0], "".join(retvals[1]["stdout"])
def sign_key(self, keyid, signingkey=None):
action = ["--yes", "--sign-key", keyid]
if signingkey:
action.insert(1, "-u")
action.insert(2, signingkey)
self.event.running_gpg(_('Signing key %s with %s'
) % (keyid, signingkey or _('default')))
retvals = self.run(action, send_passphrase=True)
return retvals
def delete_key(self, key_fingerprint):
cmd = ['--yes', '--delete-secret-and-public-key', key_fingerprint]
return self.run(cmd)
def recv_key(self, keyid,
keyservers=DEFAULT_KEYSERVERS,
keyserver_options=DEFAULT_KEYSERVER_OPTIONS):
self.event.running_gpg(_('Downloading key %s from key servers'
) % (keyid))
for keyserver in keyservers:
cmd = ['--keyserver', keyserver,
'--recv-key', self._escape_hex_keyid_term(keyid)]
for opt in keyserver_options:
cmd[2:2] = ['--keyserver-options', opt]
retvals = self.run(cmd)
if 'unsupported' not in ''.join(retvals[1]["stdout"]):
break
return self._parse_import(retvals[1]["status"])
def search_key(self, term,
keyservers=DEFAULT_KEYSERVERS,
keyserver_options=DEFAULT_KEYSERVER_OPTIONS):
self.event.running_gpg(_('Searching for key for %s in key servers'
) % (term))
for keyserver in keyservers:
cmd = ['--keyserver', keyserver,
'--fingerprint',
'--search-key', self._escape_hex_keyid_term(term)]
for opt in keyserver_options:
cmd[2:2] = ['--keyserver-options', opt]
retvals = self.run(cmd)
if 'unsupported' not in ''.join(retvals[1]["stdout"]):
break
results = {}
lines = [x.strip().split(":") for x in retvals[1]["stdout"]]
curpub = None
for line in lines:
if line[0] == "info":
pass
elif line[0] == "pub":
curpub = line[1]
validity = line[6]
if line[5]:
if int(line[5]) < time.time():
validity += 'e'
results[curpub] = {
"created": datetime.fromtimestamp(int(line[4])),
"keytype_name": _(openpgp_algorithms.get(int(line[2]),
'Unknown')),
"keysize": line[3],
"validity": validity,
"uids": [],
"fingerprint": curpub
}
elif line[0] == "uid":
email, name, comment = parse_uid(line[1])
results[curpub]["uids"].append({"name": name,
"email": email,
"comment": comment})
return results
def get_pubkey(self, keyid):
return self.export_pubkeys(selectors=[keyid])
def export_pubkeys(self, selectors=None):
self.event.running_gpg(_('Exporting keys %s from keychain'
) % (selectors,))
retvals = self.run(['--armor',
'--export'] + (selectors or [])
)[1]["stdout"]
return "".join(retvals)
def export_privkeys(self, selectors=None):
retvals = self.run(['--armor',
'--export-secret-keys'] + (selectors or [])
)[1]["stdout"]
return "".join(retvals)
def address_to_keys(self, address):
res = {}
keys = self.list_keys(selectors=[address])
for key, props in keys.iteritems():
if any([x["email"] == address for x in props["uids"]]):
res[key] = props
return res
def _escape_hex_keyid_term(self, term):
"""Prepends a 0x to hexadecimal key ids.
For example, D13C70DA is converted to 0xD13C70DA. This is required
by version 2.x of GnuPG (and is accepted by 1.x).
"""
is_hex_keyid = False
if len(term) == GPG_KEYID_LENGTH or len(term) == 2*GPG_KEYID_LENGTH:
hex_digits = set(string.hexdigits)
is_hex_keyid = all(c in hex_digits for c in term)
if is_hex_keyid:
return '0x%s' % term
else:
return term
def chat(self, gpg_args, callback, *args, **kwargs):
"""This lets a callback have a chat with the GPG process..."""
gpg_args = [self.gpgbinary,
"--utf8-strings",
# Disable SHA1 in all things GnuPG
"--personal-digest-preferences=SHA512",
"--digest-algo=SHA512",
"--cert-digest-algo=SHA512",
# We're not a human!
"--no-tty",
"--command-fd=0",
"--status-fd=1"] + (gpg_args or [])
if self.homedir:
gpg_args.insert(1, "--homedir=%s" % self.homedir)
if self.version_tuple() > (2, 1):
gpg_args.insert(2, "--pinentry-mode=loopback")
else:
gpg_args.insert(2, "--no-use-agent")
proc = None
try:
# Here we go!
self.debug('Running %s' % ' '.join(gpg_args))
self.event.update_args(gpg_args)
proc = Popen(gpg_args, stdin=PIPE, stdout=PIPE, stderr=PIPE,
bufsize=0, long_running=True)
return callback(proc, *args, **kwargs)
finally:
# Close this so GPG will terminate. This should already have
# been done, but we're handling errors here...
if proc and proc.stdin:
proc.stdin.close()
if proc:
self.event.update_return_code(proc.wait())
else:
self.event.update_return_code(-1)
def GetKeys(gnupg, config, people):
keys = []
missing = []
ambig = []
# First, we go to the contact database and get a list of keys.
for person in set(people):
if '#' in person:
keys.append(person.rsplit('#', 1)[1])
else:
vcard = config.vcards.get_vcard(person)
if vcard:
# It is the VCard's job to give us the best key first.
lines = [vcl for vcl in vcard.get_all('KEY')
if vcl.value.startswith('data:application'
'/x-pgp-fingerprint,')]
if len(lines) > 0:
keys.append(lines[0].value.split(',', 1)[1])
else:
missing.append(person)
else:
missing.append(person)
# Load key data from gnupg for use below
if keys:
all_keys = gnupg.list_keys(selectors=keys)
else:
all_keys = {}
if missing:
# Keys are missing, so we try to just search the keychain
all_keys.update(gnupg.list_keys(selectors=missing))
found = []
for key_id, key in all_keys.iteritems():
for uid in key.get("uids", []):
if uid.get("email", None) in missing:
missing.remove(uid["email"])
found.append(uid["email"])
keys.append(key_id)
elif uid.get("email", None) in found:
ambig.append(uid["email"])
# Next, we go make sure all those keys are really in our keychain.
fprints = all_keys.keys()
for key in keys:
key = key.upper()
if key.startswith('0x'):
key = key[2:]
if key not in fprints:
match = [k for k in fprints if k.endswith(key)]
if len(match) == 0:
missing.append(key)
elif len(match) > 1:
ambig.append(key)
if missing:
raise KeyLookupError(_('Keys missing for %s'
) % ', '.join(missing), missing)
elif ambig:
ambig = list(set(ambig))
raise KeyLookupError(_('Keys ambiguous for %s'
) % ', '.join(ambig), ambig)
return keys
class OpenPGPMimeSigningWrapper(MimeSigningWrapper):
CONTAINER_PARAMS = (('micalg', 'pgp-sha512'),
('protocol', 'application/pgp-signature'))
SIGNATURE_TYPE = 'application/pgp-signature'
SIGNATURE_DESC = 'OpenPGP Digital Signature'
def crypto(self):
return GnuPG(self.config, event=self.event)
def get_keys(self, who):
return GetKeys(self.crypto(), self.config, who)
class OpenPGPMimeEncryptingWrapper(MimeEncryptingWrapper):
CONTAINER_PARAMS = (('protocol', 'application/pgp-encrypted'), )
ENCRYPTION_TYPE = 'application/pgp-encrypted'
ENCRYPTION_VERSION = 1
# FIXME: Define _encrypt, allow throw_keyids
def crypto(self):
return GnuPG(self.config, event=self.event)
def get_keys(self, who):
return GetKeys(self.crypto(), self.config, who)
class OpenPGPMimeSignEncryptWrapper(OpenPGPMimeEncryptingWrapper):
CONTAINER_PARAMS = (('protocol', 'application/pgp-encrypted'), )
ENCRYPTION_TYPE = 'application/pgp-encrypted'
ENCRYPTION_VERSION = 1
def crypto(self):
return GnuPG(self.config)
def _encrypt(self, message_text, tokeys=None, armor=False):
from_key = self.get_keys([self.sender])[0]
# FIXME: Allow throw_keyids here.
return self.crypto().encrypt(message_text,
tokeys=tokeys, armor=True,
sign=True, fromkey=from_key)
def _update_crypto_status(self, part):
part.signature_info.part_status = 'verified'
part.encryption_info.part_status = 'decrypted'
class GnuPGExpectScript(threading.Thread):
STARTUP = 'Startup'
START_GPG = 'Start GPG'
FINISHED = 'Finished'
SCRIPT = []
VARIABLES = {}
DESCRIPTION = 'GnuPG Expect Script'
RUNNING_STATES = [STARTUP, START_GPG]
DEFAULT_TIMEOUT = 60 # Infinite wait isn't desirable
def __init__(self, gnupg,
sps=None, event=None, variables={}, on_complete=None):
threading.Thread.__init__(self)
self.daemon = True
self._lock = threading.RLock()
self.before = ''
with self._lock:
self.state = self.STARTUP
self.gnupg = gnupg
self.event = event
self.variables = variables or self.VARIABLES
self._on_complete = [on_complete] if on_complete else []
self.main_script = self.SCRIPT[:]
self.sps = sps
if sps:
self.variables['passphrase'] = '!!<SPS'
def __str__(self):
return '%s: %s' % (threading.Thread.__str__(self), self.state)
running = property(lambda self: (self.state in self.RUNNING_STATES))
failed = property(lambda self: False)
def in_state(self, state):
pass
def set_state(self, state):
self.state = state
self.in_state(state)
def sendline(self, proc, line):
if line == '!!<SPS':
reader = self.sps.get_reader()
while True:
c = reader.read()
if c != '':
proc.stdin.write(c)
else:
proc.stdin.write('\n')
break
else:
proc.stdin.write(line.encode('utf-8'))
proc.stdin.write('\n')
def _expecter(self, proc, exp, timebox):
while timebox[0] > 0:
self.before += proc.stdout.read(1)
if exp in self.before:
self.before = self.before.split(exp)[0]
return True
return False
def expect_exact(self, proc, exp, timeout=None):
from mailpile.util import RunTimed, TimedOut
timeout = timeout if (timeout and timeout > 0) else self.DEFAULT_TIMEOUT
timebox = [timeout]
self.before = ''
try:
self.gnupg.debug('Expect: %s' % exp)
if RunTimed(timeout, self._expecter, proc, exp, timebox):
return True
else:
raise TimedOut()
except TimedOut:
timebox[0] = 0
self.gnupg.debug('Timed out')
print 'Boo! %s not found in %s' % (exp, self.before)
raise
def run_script(self, proc, script):
for exp, rpl, tmo, state in script:
self.expect_exact(proc, exp, timeout=tmo)
if rpl:
self.sendline(proc, (rpl % self.variables).strip())
if state:
self.set_state(state)
def gpg_args(self):
return ['--no-use-agent', '--list-keys']
def run(self):
try:
self.set_state(self.START_GPG)
gpg = self.gnupg
gpg.event.running_gpg(_(self.DESCRIPTION) % self.variables)
gpg.chat(self.gpg_args(), self.run_script, self.main_script)
self.set_state(self.FINISHED)
except:
import traceback
traceback.print_exc()
finally:
with self._lock:
if self.state != self.FINISHED:
self.state = 'Failed: ' + self.state
for name, callback in self._on_complete:
callback()
self._on_complete = None
def on_complete(self, name, callback):
with self._lock:
if self._on_complete is not None:
if name not in [o[0] for o in self._on_complete]:
self._on_complete.append((name, callback))
else:
callback()
class GnuPGBaseKeyGenerator(GnuPGExpectScript):
"""This is a background thread which generates a new PGP key."""
AWAITING_LOCK = 'Pending keygen'
KEY_SETUP = 'Key Setup'
GATHER_ENTROPY = 'Creating key'
CREATED_KEY = 'Created key'
HAVE_KEY = 'Have Key'
VARIABLES = {
'keytype': '1',
'bits': '2048',
'name': 'Mailpile Generated Key',
'email': '',
'comment': 'www.mailpile.is',
'passphrase': 'mailpile'}
DESCRIPTION = _('Creating a %(bits)s bit GnuPG key')
RUNNING_STATES = (GnuPGExpectScript.RUNNING_STATES +
[AWAITING_LOCK, KEY_SETUP, GATHER_ENTROPY, HAVE_KEY])
failed = property(lambda self: (not self.running and
not self.generated_key))
def __init__(self, *args, **kwargs):
super(GnuPGBaseKeyGenerator, self).__init__(*args, **kwargs)
self.generated_key = None
def in_state(self, state):
if state == self.HAVE_KEY:
self.generated_key = self.before.strip().split()[-1]
def run(self):
# In order to minimize risk of timeout during key generation (due to
# lack of entropy), we serialize them here using a global lock
self.set_state(self.AWAITING_LOCK)
self.event.message = _('Waiting to generate a %d bit GnuPG key.'
% self.variables['bits'])
with ENTROPY_LOCK:
self.event.data['keygen_gotlock'] = 1
self.event.message = _('Generating new %d bit PGP key.'
% self.variables['bits'])
super(GnuPGBaseKeyGenerator, self).run()
class GnuPG14KeyGenerator(GnuPGBaseKeyGenerator):
"""This is the GnuPG 1.4x specific PGP key generation script."""
B = GnuPGBaseKeyGenerator
# FIXME: If GnuPG starts asking for things in a different order,
# we'll needlessly fail. To address this, we need to make
# the expect logic smarter. For now, we just assume the GnuPG
# team will be hesitant to change things.
SCRIPT = [
('GET_LINE keygen.algo', '%(keytype)s', -1, B.KEY_SETUP),
('GET_LINE keygen.size', '%(bits)s', -1, None),
('GET_LINE keygen.valid', '0', -1, None),
('GET_LINE keygen.name', '%(name)s', -1, None),
('GET_LINE keygen.email', '%(email)s', -1, None),
('GET_LINE keygen.comment', '%(comment)s', -1, None),
('GET_HIDDEN passphrase', '%(passphrase)s', -1, None),
('GOT_IT', None, -1, B.GATHER_ENTROPY),
('KEY_CREATED', None, 7200, B.CREATED_KEY),
('\n', None, -1, B.HAVE_KEY)]
def gpg_args(self):
return ['--no-use-agent', '--allow-freeform-uid', '--gen-key']
class GnuPG21KeyGenerator(GnuPG14KeyGenerator):
"""This is the GnuPG 2.1.x specific PGP key generation script."""
# Note: We don't use the nice --quick-generate-key function, because
# it won't let us generate a usable key with custom parameters in
# a single pass. So using the existing expect logic turns out to
# be less work in practice. Oh well.
def gpg_args(self):
# --yes should keep GnuPG from complaining if there already exists
# a key with this UID.
return ['--yes', '--allow-freeform-uid', '--full-gen-key']
class GnuPGDummyKeyGenerator(GnuPGBaseKeyGenerator):
"""A dummy key generator class, for incompatible versions of GnuPG."""
DESCRIPTION = _('Unable to create a %(bits)s bit key, wrong GnuPG version')
def __init__(self, *args, **kwargs):
GnuPGBaseKeyGenerator.__init__(self, *args, **kwargs)
self.generated_key = False
def run(self):
with self._lock:
self.gnupg.event.running_gpg(_(self.DESCRIPTION) % self.variables)
self.set_state(self.FINISHED)
for name, callback in self._on_complete:
callback()
self._on_complete = None
def GnuPGKeyGenerator(gnupg, **kwargs):
"""Return an instanciated generator, depending on GnuPG version."""
version = gnupg.version_tuple()
if version < (1, 5):
return GnuPG14KeyGenerator(gnupg, **kwargs)
elif version >= (2, 1):
return GnuPG21KeyGenerator(gnupg, **kwargs)
else:
return GnuPGDummyKeyGenerator(gnupg, **kwargs)
# Reset our translation variable
_ = gettext
## Include the SKS keyserver certificate here ##
KEYSERVER_CERTIFICATE="""
-----BEGIN CERTIFICATE-----
MIIFizCCA3OgAwIBAgIJAK9zyLTPn4CPMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNV
BAYTAk5PMQ0wCwYDVQQIDARPc2xvMR4wHAYDVQQKDBVza3Mta2V5c2VydmVycy5u
ZXQgQ0ExHjAcBgNVBAMMFXNrcy1rZXlzZXJ2ZXJzLm5ldCBDQTAeFw0xMjEwMDkw
MDMzMzdaFw0yMjEwMDcwMDMzMzdaMFwxCzAJBgNVBAYTAk5PMQ0wCwYDVQQIDARP
c2xvMR4wHAYDVQQKDBVza3Mta2V5c2VydmVycy5uZXQgQ0ExHjAcBgNVBAMMFXNr
cy1rZXlzZXJ2ZXJzLm5ldCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
ggIBANdsWy4PXWNUCkS3L//nrd0GqN3dVwoBGZ6w94Tw2jPDPifegwxQozFXkG6I
6A4TK1CJLXPvfz0UP0aBYyPmTNadDinaB9T4jIwd4rnxl+59GiEmqkN3IfPsv5Jj
MkKUmJnvOT0DEVlEaO1UZIwx5WpfprB3mR81/qm4XkAgmYrmgnLXd/pJDAMk7y1F
45b5zWofiD5l677lplcIPRbFhpJ6kDTODXh/XEdtF71EAeaOdEGOvyGDmCO0GWqS
FDkMMPTlieLA/0rgFTcz4xwUYj/cD5e0ZBuSkYsYFAU3hd1cGfBue0cPZaQH2HYx
Qk4zXD8S3F4690fRhr+tki5gyG6JDR67aKp3BIGLqm7f45WkX1hYp+YXywmEziM4
aSbGYhx8hoFGfq9UcfPEvp2aoc8u5sdqjDslhyUzM1v3m3ZGbhwEOnVjljY6JJLx
MxagxnZZSAY424ZZ3t71E/Mn27dm2w+xFRuoy8JEjv1d+BT3eChM5KaNwrj0IO/y
u8kFIgWYA1vZ/15qMT+tyJTfyrNVV/7Df7TNeWyNqjJ5rBmt0M6NpHG7CrUSkBy9
p8JhimgjP5r0FlEkgg+lyD+V79H98gQfVgP3pbJICz0SpBQf2F/2tyS4rLm+49rP
fcOajiXEuyhpcmzgusAj/1FjrtlynH1r9mnNaX4e+rLWzvU5AgMBAAGjUDBOMB0G
A1UdDgQWBBTkwyoJFGfYTVISTpM8E+igjdq28zAfBgNVHSMEGDAWgBTkwyoJFGfY
TVISTpM8E+igjdq28zAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQAR
OXnYwu3g1ZjHyley3fZI5aLPsaE17cOImVTehC8DcIphm2HOMR/hYTTL+V0G4P+u
gH+6xeRLKSHMHZTtSBIa6GDL03434y9CBuwGvAFCMU2GV8w92/Z7apkAhdLToZA/
X/iWP2jeaVJhxgEcH8uPrnSlqoPBcKC9PrgUzQYfSZJkLmB+3jEa3HKruy1abJP5
gAdQvwvcPpvYRnIzUc9fZODsVmlHVFBCl2dlu/iHh2h4GmL4Da2rRkUMlbVTdioB
UYIvMycdOkpH5wJftzw7cpjsudGas0PARDXCFfGyKhwBRFY7Xp7lbjtU5Rz0Gc04
lPrhDf0pFE98Aw4jJRpFeWMjpXUEaG1cq7D641RpgcMfPFvOHY47rvDTS7XJOaUT
BwRjmDt896s6vMDcaG/uXJbQjuzmmx3W2Idyh3s5SI0GTHb0IwMKYb4eBUIpQOnB
cE77VnCYqKvN1NVYAqhWjXbY7XasZvszCRcOG+W3FqNaHOK/n/0ueb0uijdLan+U
f4p1bjbAox8eAOQS/8a3bzkJzdyBNUKGx1BIK2IBL9bn/HravSDOiNRSnZ/R3l9G
ZauX0tu7IIDlRCILXSyeazu0aj/vdT3YFQXPcvt5Fkf5wiNTo53f72/jYEJd6qph
WrpoKqrwGwTpRUCMhYIUt65hsTxCiJJ5nKe39h46sg==
-----END CERTIFICATE-----
"""
| ./CrossVul/dataset_final_sorted/CWE-310/py/bad_546_0 |
crossvul-python_data_bad_3699_0 | """Encryption module that uses pycryptopp or pycrypto"""
try:
# Pycryptopp is preferred over Crypto because Crypto has had
# various periods of not being maintained, and pycryptopp uses
# the Crypto++ library which is generally considered the 'gold standard'
# of crypto implementations
from pycryptopp.cipher import aes
def aesEncrypt(data, key):
cipher = aes.AES(key)
return cipher.process(data)
# magic.
aesDecrypt = aesEncrypt
except ImportError:
from Crypto.Cipher import AES
def aesEncrypt(data, key):
cipher = AES.new(key)
data = data + (" " * (16 - (len(data) % 16)))
return cipher.encrypt(data)
def aesDecrypt(data, key):
cipher = AES.new(key)
return cipher.decrypt(data).rstrip()
def getKeyLength():
return 32
| ./CrossVul/dataset_final_sorted/CWE-310/py/bad_3699_0 |
crossvul-python_data_bad_546_1 | import datetime
import re
import time
import urllib2
from email import encoders
from email.mime.base import MIMEBase
import mailpile.security as security
from mailpile.conn_brokers import Master as ConnBroker
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.commands import Command
from mailpile.crypto.gpgi import GnuPG
from mailpile.crypto.gpgi import OpenPGPMimeSigningWrapper
from mailpile.crypto.gpgi import OpenPGPMimeEncryptingWrapper
from mailpile.crypto.gpgi import OpenPGPMimeSignEncryptWrapper
from mailpile.crypto.mime import UnwrapMimeCrypto, MessageAsString
from mailpile.crypto.mime import OBSCURE_HEADERS_MILD, OBSCURE_HEADERS_EXTREME
from mailpile.crypto.mime import ObscureSubject
from mailpile.crypto.state import EncryptionInfo, SignatureInfo
from mailpile.eventlog import GetThreadEvent
from mailpile.mailutils.addresses import AddressHeaderParser
from mailpile.mailutils.emails import Email, MakeContentID, ClearParseCache
from mailpile.plugins import PluginManager, EmailTransform
from mailpile.plugins.vcard_gnupg import PGPKeysImportAsVCards
from mailpile.plugins.search import Search
_plugins = PluginManager(builtin=__file__)
##[ GnuPG e-mail processing ]#################################################
class ContentTxf(EmailTransform):
def _wrap_key_in_html(self, title, keydata):
return ((
"<html><head><meta charset='utf-8'></head><body>\n"
"<h1>%(title)s</h1><p>\n\n%(description)s\n\n</p>"
"<pre>\n%(key)s\n</pre><hr>"
"<i><a href='%(ad_url)s'>%(ad)s</a>.</i></body></html>"
) % self._wrap_key_in_html_vars(title, keydata)).encode('utf-8')
def _wrap_key_in_html_vars(self, title, keydata):
return {
"title": title,
"description": _(
"This is a digital encryption key, which you can use to send\n"
"confidential messages to the owner, or to verify their\n"
"digital signatures. You can safely discard or ignore this\n"
"file if you do not use e-mail encryption or signatures."),
"ad": _("Generated by Mailpile and GnuPG"),
"ad_url": "https://www.mailpile.is/", # FIXME: Link to help?
"key": keydata}
def TransformOutgoing(self, sender, rcpts, msg, **kwargs):
matched = False
gnupg = None
sender_keyid = None
# Prefer to just get everything from the profile VCard, in the
# common case...
profile = self._get_sender_profile(sender, kwargs)
if profile['vcard'] is not None:
sender_keyid = profile['vcard'].pgp_key
crypto_format = profile.get('crypto_format') or 'none'
# Parse the openpgp_header data from the crypto_format
openpgp_header = [p.split(':')[-1]
for p in crypto_format.split('+')
if p.startswith('openpgp_header:')]
if not openpgp_header:
openpgp_header = self.config.prefs.openpgp_header and ['CFG']
if openpgp_header[0] != 'N' and not sender_keyid:
# This is a fallback: this shouldn't happen much in normal use
try:
gnupg = gnupg or GnuPG(self.config, event=GetThreadEvent())
seckeys = dict([(uid["email"], fp) for fp, key
in gnupg.list_secret_keys().iteritems()
if key["capabilities_map"].get("encrypt")
and key["capabilities_map"].get("sign")
for uid in key["uids"]])
sender_keyid = seckeys.get(sender)
except (KeyError, TypeError, IndexError, ValueError):
traceback.print_exc()
if sender_keyid and openpgp_header:
preference = {
'ES': 'signencrypt',
'SE': 'signencrypt',
'E': 'encrypt',
'S': 'sign',
'N': 'unprotected',
'CFG': self.config.prefs.openpgp_header
}[openpgp_header[0].upper()]
msg["OpenPGP"] = ("id=%s; preference=%s"
% (sender_keyid, preference))
if ('attach-pgp-pubkey' in msg and
msg['attach-pgp-pubkey'][:3].lower() in ('yes', 'tru')):
gnupg = gnupg or GnuPG(self.config, event=GetThreadEvent())
if sender_keyid:
keys = gnupg.list_keys(selectors=[sender_keyid])
else:
keys = gnupg.address_to_keys(AddressHeaderParser(sender).addresses_list()[0])
key_count = 0
for fp, key in keys.iteritems():
if not any(key["capabilities_map"].values()):
continue
# We should never really hit this more than once. But if we
# do, should still be fine.
keyid = key["keyid"]
data = gnupg.get_pubkey(keyid)
try:
from_name = key["uids"][0]["name"]
filename = _('Encryption key for %s') % from_name
except:
filename = _('My encryption key')
if self.config.prefs.gpg_html_wrap:
data = self._wrap_key_in_html(filename, data)
ext = 'html'
else:
ext = 'asc'
att = MIMEBase('application', 'pgp-keys')
att.set_payload(data)
encoders.encode_base64(att)
del att['MIME-Version']
att.add_header('Content-Id', MakeContentID())
att.add_header('Content-Disposition', 'attachment',
filename=filename + '.' + ext)
att.signature_info = SignatureInfo(parent=msg.signature_info)
att.encryption_info = EncryptionInfo(parent=msg.encryption_info)
msg.attach(att)
key_count += 1
if key_count > 0:
msg['x-mp-internal-pubkeys-attached'] = "Yes"
return sender, rcpts, msg, matched, True
class CryptoTxf(EmailTransform):
def TransformOutgoing(self, sender, rcpts, msg,
crypto_policy='none',
crypto_format='default',
cleaner=lambda m: m,
**kwargs):
matched = False
if 'pgp' in crypto_policy or 'gpg' in crypto_policy:
wrapper = None
# Set defaults
prefer_inline = kwargs.get('prefer_inline', False)
if 'obscure_all_meta' in crypto_format:
obscured = OBSCURE_HEADERS_EXTREME
elif 'obscure_meta' in crypto_format:
obscured = OBSCURE_HEADERS_MILD
elif self.config.prefs.encrypt_subject:
obscured = {'subject': ObscureSubject}
else:
obscured = {}
if 'sign' in crypto_policy and 'encrypt' in crypto_policy:
wrapper = OpenPGPMimeSignEncryptWrapper
prefer_inline = 'prefer_inline' in crypto_format
elif 'encrypt' in crypto_policy:
wrapper = OpenPGPMimeEncryptingWrapper
prefer_inline = 'prefer_inline' in crypto_format
elif 'sign' in crypto_policy:
# When signing only, we 1) prefer inline by default, based
# on this: https://github.com/mailpile/Mailpile/issues/1693
# and 2) don't obscure any headers as that's pointless.
wrapper = OpenPGPMimeSigningWrapper
prefer_inline = 'pgpmime' not in crypto_format
obscured = {}
if wrapper:
msg = wrapper(self.config,
sender=sender,
cleaner=cleaner,
recipients=rcpts,
use_html_wrapper=self.config.prefs.gpg_html_wrap,
obscured_headers=obscured
).wrap(msg, prefer_inline=prefer_inline)
matched = True
return sender, rcpts, msg, matched, (not matched)
_plugins.register_outgoing_email_content_transform('500_gnupg', ContentTxf)
_plugins.register_outgoing_email_crypto_transform('500_gnupg', CryptoTxf)
##[ Misc. GPG-related API commands ]##########################################
class GPGKeySearch(Command):
"""Search for a GPG Key."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/searchkey', 'crypto/gpg/searchkey', '<terms>')
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = {'q': 'search terms'}
class CommandResult(Command.CommandResult):
def as_text(self):
if self.result:
return '\n'.join(["%s: %s <%s>" % (keyid, x["name"], x["email"]) for keyid, det in self.result.iteritems() for x in det["uids"]])
else:
return _("No results")
def command(self):
args = list(self.args)
for q in self.data.get('q', []):
args.extend(q.split())
return self._gnupg().search_key(" ".join(args))
class GPGKeyReceive(Command):
"""Fetch a GPG Key."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/receivekey', 'crypto/gpg/receivekey', '<keyid>')
HTTP_CALLABLE = ('POST', )
HTTP_QUERY_VARS = {'keyid': 'ID of key to fetch'}
COMMAND_SECURITY = security.CC_CHANGE_GNUPG
def command(self):
keyid = self.data.get("keyid", self.args)
res = []
for key in keyid:
res.append(self._gnupg().recv_key(key))
# Previous crypto evaluations may now be out of date, so we
# clear the cache so users can see results right away.
ClearParseCache(pgpmime=True)
return res
class GPGKeyImport(Command):
"""Import a GPG Key."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/importkey', 'crypto/gpg/importkey',
'<key_file>')
HTTP_CALLABLE = ('POST', )
HTTP_QUERY_VARS = {
'key_data': 'ASCII armor of public key to be imported',
'key_file': 'Location of file containing the public key',
'key_url': 'URL of file containing the public key',
'name': '(ignored)'
}
COMMAND_SECURITY = security.CC_CHANGE_GNUPG
def command(self):
key_files = self.data.get("key_file", []) + [a for a in self.args
if not '://' in a]
key_urls = self.data.get("key_url", []) + [a for a in self.args
if '://' in a]
key_data = []
key_data.extend(self.data.get("key_data", []))
for key_file in key_files:
with open(key_file) as file:
key_data.append(file.read())
for key_url in key_urls:
with ConnBroker.context(need=[ConnBroker.OUTGOING_HTTP]):
uo = urllib2.urlopen(key_url)
key_data.append(uo.read())
rv = self._gnupg().import_keys('\n'.join(key_data))
# Previous crypto evaluations may now be out of date, so we
# clear the cache so users can see results right away.
ClearParseCache(pgpmime=True)
# Update the VCards!
PGPKeysImportAsVCards(self.session,
arg=([i['fingerprint'] for i in rv['updated']] +
[i['fingerprint'] for i in rv['imported']])
).run()
return self._success(_("Imported %d keys") % len(key_data), rv)
class GPGKeySign(Command):
"""Sign a key."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/signkey', 'crypto/gpg/signkey', '<keyid> [<signingkey>]')
HTTP_CALLABLE = ('POST',)
HTTP_QUERY_VARS = {'keyid': 'The key to sign',
'signingkey': 'The key to sign with'}
COMMAND_SECURITY = security.CC_CHANGE_GNUPG
def command(self):
signingkey = None
keyid = None
args = list(self.args)
try: keyid = args.pop(0)
except: keyid = self.data.get("keyid", None)
try: signingkey = args.pop(0)
except: signingkey = self.data.get("signingkey", None)
print keyid
if not keyid:
return self._error("You must supply a keyid", None)
rv = self._gnupg().sign_key(keyid, signingkey)
# Previous crypto evaluations may now be out of date, so we
# clear the cache so users can see results right away.
ClearParseCache(pgpmime=True)
return rv
class GPGKeyImportFromMail(Search):
"""Import a GPG Key."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/importkeyfrommail',
'crypto/gpg/importkeyfrommail', '<mid>')
HTTP_CALLABLE = ('POST', )
HTTP_QUERY_VARS = {'mid': 'Message ID', 'att': 'Attachment ID'}
COMMAND_CACHE_TTL = 0
COMMAND_SECURITY = security.CC_CHANGE_GNUPG
class CommandResult(Command.CommandResult):
def __init__(self, *args, **kwargs):
Command.CommandResult.__init__(self, *args, **kwargs)
def as_text(self):
if self.result:
return "Imported %d keys (%d updated, %d unchanged) from the mail" % (
self.result["results"]["count"],
self.result["results"]["imported"],
self.result["results"]["unchanged"])
return ""
def command(self):
session, config, idx = self.session, self.session.config, self._idx()
args = list(self.args)
if args and args[-1][0] == "#":
attid = args.pop()
else:
attid = self.data.get("att", 'application/pgp-keys')
args.extend(["=%s" % x for x in self.data.get("mid", [])])
eids = self._choose_messages(args)
if len(eids) < 0:
return self._error("No messages selected", None)
elif len(eids) > 1:
return self._error("One message at a time, please", None)
email = Email(idx, list(eids)[0])
fn, attr = email.extract_attachment(session, attid, mode='inline')
if attr and attr["data"]:
res = self._gnupg().import_keys(attr["data"])
# Previous crypto evaluations may now be out of date, so we
# clear the cache so users can see results right away.
ClearParseCache(pgpmime=True)
return self._success("Imported key", res)
return self._error("No results found", None)
class GPGKeyList(Command):
"""List GPG Keys."""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/keylist',
'crypto/gpg/keylist', '<address>')
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = {'address': 'E-mail address'}
def command(self):
args = list(self.args)
if len(args) > 0:
addr = args[0]
else:
addr = self.data.get("address", None)
if addr is None:
return self._error("Must supply e-mail address", None)
res = self._gnupg().address_to_keys(addr)
return self._success("Searched for keys for e-mail address", res)
class GPGKeyListSecret(Command):
"""List Secret GPG Keys"""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/keylist/secret',
'crypto/gpg/keylist/secret', '<address>')
HTTP_CALLABLE = ('GET', )
def command(self):
res = self._gnupg().list_secret_keys()
return self._success("Searched for secret keys", res)
class GPGUsageStatistics(Search):
"""Get usage statistics from mail, given an address"""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/statistics',
'crypto/gpg/statistics', '<address>')
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = {'address': 'E-mail address'}
COMMAND_CACHE_TTL = 0
class CommandResult(Command.CommandResult):
def __init__(self, *args, **kwargs):
Command.CommandResult.__init__(self, *args, **kwargs)
def as_text(self):
if self.result:
return "%d%% of e-mail from %s has PGP signatures (%d/%d)" % (
100*self.result["ratio"],
self.result["address"],
self.result["pgpsigned"],
self.result["messages"])
return ""
def command(self):
args = list(self.args)
if len(args) > 0:
addr = args[0]
else:
addr = self.data.get("address", None)
if addr is None:
return self._error("Must supply an address", None)
session, idx = self._do_search(search=["from:%s" % addr])
total = 0
for messageid in session.results:
total += 1
session, idx = self._do_search(search=["from:%s" % addr, "has:pgp"])
pgp = 0
for messageid in session.results:
pgp += 1
if total > 0:
ratio = float(pgp)/total
else:
ratio = 0
res = {"messages": total,
"pgpsigned": pgp,
"ratio": ratio,
"address": addr}
return self._success("Got statistics for address", res)
class GPGCheckKeys(Search):
"""Sanity check your keys and profiles"""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/gpg/check_keys', 'crypto/gpg/check_keys',
'[--all-keys]')
HTTP_CALLABLE = ('GET', )
COMMAND_CACHE_TTL = 0
MIN_KEYSIZE = 2048
class CommandResult(Command.CommandResult):
def __init__(self, *args, **kwargs):
Command.CommandResult.__init__(self, *args, **kwargs)
def as_text(self):
if not isinstance(self.result, (dict,)):
return ''
if self.result.get('details'):
message = '%s.\n - %s' % (self.message, '\n - '.join(
p['description'] for p in self.result['details']
))
else:
message = '%s. %s' % (self.message, _('Looks good!'))
if self.result.get('fixes'):
message += '\n\n%s\n - %s' % (_('Proposed fixes:'),
'\n - '.join(
'\n * '.join(f) for f in self.result['fixes']
))
return message
def _fix_gen_key(self, min_bits=2048):
return [
_("You need a new key!"),
_("Run: %s") % '`gpg --gen-key`',
_("Answer the tool\'s questions: use RSA and RSA, %d bits or more"
) % min_bits]
def _fix_mp_config(self, good_key=None):
fprint = (good_key['fingerprint'] if good_key else '<FINGERPRINT>')
return [
_('Update the Mailpile config to use a good key:'),
_('IMPORTANT: This MUST be done before disabling the key!'),
_('Run: %s') % ('`set prefs.gpg_recipient = %s`' % fprint),
_('Run: %s') % ('`optimize`'),
_('This key\'s passphrase will be used to log in to Mailpile')]
def _fix_revoke_key(self, fprint, comment=''):
return [
_('Revoke bad keys:') + (' ' + comment if comment else ''),
_('Run: %s') % ('`gpg --gen-revoke %s`' % fprint),
_('Say yes to the first question, then follow the instructions'),
_('A revocation certificate will be shown on screen'),
_('Copy & paste that, save, and send to people who have the old key'),
_('You can search for %s to find such people'
) % '`is:encrypted to:me`']
def _fix_disable_key(self, fprint, comment=''):
return [
_('Disable bad keys:') + (' ' + comment if comment else ''),
_('Run: %s') % ('`gpg --edit-key %s`' % fprint),
_('Type %s') % '`disable`',
_('Type %s') % '`save`']
def command(self):
session, config = self.session, self.session.config
args = list(self.args)
all_keys = '--all-keys' in args
quiet = '--quiet' in args
date = datetime.date.today()
today = date.strftime("%Y-%m-%d")
date += datetime.timedelta(days=14)
fortnight = date.strftime("%Y-%m-%d")
serious = 0
details = []
fixes = []
bad_keys = {}
good_key = None
good_keys = {}
secret_keys = self._gnupg().list_secret_keys()
for fprint, info in secret_keys.iteritems():
k_info = {
'description': None,
'key': fprint,
'keysize': int(info.get('keysize', 0)),
}
is_serious = True
exp = info.get('expiration_date')
if info["disabled"]:
k_info['description'] = _('%s: --- Disabled.') % fprint
is_serious = False
elif (not info['capabilities_map'].get('encrypt') or
not info['capabilities_map'].get('sign')):
if info.get("revoked"):
k_info['description'] = _('%s: --- Revoked.'
) % fprint
is_serious = False
elif exp and exp <= today:
k_info['description'] = _('%s: Bad: Expired on %s'
) % (fprint,
info['expiration_date'])
else:
k_info['description'] = _('%s: Bad: Key is useless'
) % fprint
elif exp and exp <= fortnight:
k_info['description'] = _('%s: Bad: Expires on %s'
) % (fprint, info['expiration_date'])
elif k_info['keysize'] < self.MIN_KEYSIZE:
k_info['description'] = _('%s: Bad: Too small (%d bits)'
) % (fprint, k_info['keysize'])
else:
good_keys[fprint] = info
if (not good_key
or int(good_key['keysize']) < k_info['keysize']):
good_key = info
k_info['description'] = _('%s: OK: %d bits, looks good!'
) % (fprint, k_info['keysize'])
is_serious = False
if k_info['description'] is not None:
details.append(k_info)
if is_serious:
fixes += [self._fix_revoke_key(fprint, _('(optional)')),
self._fix_disable_key(fprint)]
serious += 1
if fprint not in good_keys:
bad_keys[fprint] = info
bad_recipient = False
if config.prefs.gpg_recipient:
for k in bad_keys:
if k.endswith(config.prefs.gpg_recipient):
details.append({
'gpg_recipient': True,
'description': _('%s: Mailpile config uses bad key'
) % k,
'key': k
})
bad_recipient = True
serious += 1
if bad_recipient and good_key:
fixes[:0] = [self._fix_mp_config(good_key)]
profiles = config.vcards.find_vcards([], kinds=['profile'])
for vc in profiles:
p_info = {
'profile': vc.get('x-mailpile-rid').value,
'email': vc.email,
'fn': vc.fn
}
try:
if all_keys:
vcls = [k.value for k in vc.get_all('key') if k.value]
else:
vcls = [vc.get('key').value]
except (IndexError, AttributeError):
vcls = []
for key in vcls:
fprint = key.split(',')[-1]
if fprint and fprint in bad_keys:
p_info['key'] = fprint
p_info['description'] = _('%(key)s: Bad key in profile'
' %(fn)s <%(email)s>'
' (%(profile)s)') % p_info
details.append(p_info)
serious += 1
if not vcls:
p_info['description'] = _('No key for %(fn)s <%(email)s>'
' (%(profile)s)') % p_info
details.append(p_info)
serious += 1
if len(good_keys) == 0:
fixes[:0] = [self._fix_gen_key(min_bits=self.MIN_KEYSIZE),
self._fix_mp_config()]
if quiet and not serious:
return self._success('OK')
ret = self._error if serious else self._success
return ret(_('Sanity checked: %d keys in GPG keyring, %d profiles')
% (len(secret_keys), len(profiles)),
result={'passed': not serious,
'details': details,
'fixes': fixes})
_plugins.register_commands(GPGKeySearch)
_plugins.register_commands(GPGKeyReceive)
_plugins.register_commands(GPGKeyImport)
_plugins.register_commands(GPGKeyImportFromMail)
_plugins.register_commands(GPGKeySign)
_plugins.register_commands(GPGKeyList)
_plugins.register_commands(GPGUsageStatistics)
_plugins.register_commands(GPGKeyListSecret)
_plugins.register_commands(GPGCheckKeys)
| ./CrossVul/dataset_final_sorted/CWE-310/py/bad_546_1 |
crossvul-python_data_good_3699_0 | """Encryption module that uses pycryptopp or pycrypto"""
try:
# Pycryptopp is preferred over Crypto because Crypto has had
# various periods of not being maintained, and pycryptopp uses
# the Crypto++ library which is generally considered the 'gold standard'
# of crypto implementations
from pycryptopp.cipher import aes
def aesEncrypt(data, key):
cipher = aes.AES(key)
return cipher.process(data)
# magic.
aesDecrypt = aesEncrypt
except ImportError:
from Crypto.Cipher import AES
from Crypto.Util import Counter
def aesEncrypt(data, key):
cipher = AES.new(key, AES.MODE_CTR,
counter=Counter.new(128, initial_value=0))
return cipher.encrypt(data)
def aesDecrypt(data, key):
cipher = AES.new(key, AES.MODE_CTR,
counter=Counter.new(128, initial_value=0))
return cipher.decrypt(data)
def getKeyLength():
return 32
| ./CrossVul/dataset_final_sorted/CWE-310/py/good_3699_0 |
crossvul-python_data_bad_3658_0 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-310/py/bad_3658_0 |
crossvul-python_data_bad_4295_1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyrigt: (c) 2017, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_csr
short_description: Generate OpenSSL Certificate Signing Request (CSR)
description:
- This module allows one to (re)generate OpenSSL certificate signing requests.
- It uses the pyOpenSSL python library to interact with openssl. This module supports
the subjectAltName, keyUsage, extendedKeyUsage, basicConstraints and OCSP Must Staple
extensions.
- "Please note that the module regenerates existing CSR if it doesn't match the module's
options, or if it seems to be corrupt. If you are concerned that this could overwrite
your existing CSR, consider using the I(backup) option."
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0."
requirements:
- Either cryptography >= 1.3
- Or pyOpenSSL >= 0.15
author:
- Yanis Guenane (@Spredzy)
options:
state:
description:
- Whether the certificate signing request should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
digest:
description:
- The digest used when signing the certificate signing request with the private key.
type: str
default: sha256
privatekey_path:
description:
- The path to the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: path
privatekey_content:
description:
- The content of the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: str
version_added: "1.0.0"
privatekey_passphrase:
description:
- The passphrase for the private key.
- This is required if the private key is password protected.
type: str
version:
description:
- The version of the certificate signing request.
- "The only allowed value according to L(RFC 2986,https://tools.ietf.org/html/rfc2986#section-4.1)
is 1."
- This option will no longer accept unsupported values from Ansible 2.14 on.
type: int
default: 1
force:
description:
- Should the certificate signing request be forced regenerated by this ansible module.
type: bool
default: no
path:
description:
- The name of the file into which the generated OpenSSL certificate signing request will be written.
type: path
required: true
subject:
description:
- Key/value pairs that will be present in the subject name field of the certificate signing request.
- If you need to specify more than one value with the same key, use a list as value.
type: dict
country_name:
description:
- The countryName field of the certificate signing request subject.
type: str
aliases: [ C, countryName ]
state_or_province_name:
description:
- The stateOrProvinceName field of the certificate signing request subject.
type: str
aliases: [ ST, stateOrProvinceName ]
locality_name:
description:
- The localityName field of the certificate signing request subject.
type: str
aliases: [ L, localityName ]
organization_name:
description:
- The organizationName field of the certificate signing request subject.
type: str
aliases: [ O, organizationName ]
organizational_unit_name:
description:
- The organizationalUnitName field of the certificate signing request subject.
type: str
aliases: [ OU, organizationalUnitName ]
common_name:
description:
- The commonName field of the certificate signing request subject.
type: str
aliases: [ CN, commonName ]
email_address:
description:
- The emailAddress field of the certificate signing request subject.
type: str
aliases: [ E, emailAddress ]
subject_alt_name:
description:
- SAN extension to attach to the certificate signing request.
- This can either be a 'comma separated string' or a YAML list.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
- Note that if no SAN is specified, but a common name, the common
name will be added as a SAN except if C(useCommonNameForSAN) is
set to I(false).
- More at U(https://tools.ietf.org/html/rfc5280#section-4.2.1.6).
type: list
elements: str
aliases: [ subjectAltName ]
subject_alt_name_critical:
description:
- Should the subjectAltName extension be considered as critical.
type: bool
aliases: [ subjectAltName_critical ]
use_common_name_for_san:
description:
- If set to C(yes), the module will fill the common name in for
C(subject_alt_name) with C(DNS:) prefix if no SAN is specified.
type: bool
default: yes
aliases: [ useCommonNameForSAN ]
key_usage:
description:
- This defines the purpose (e.g. encipherment, signature, certificate signing)
of the key contained in the certificate.
type: list
elements: str
aliases: [ keyUsage ]
key_usage_critical:
description:
- Should the keyUsage extension be considered as critical.
type: bool
aliases: [ keyUsage_critical ]
extended_key_usage:
description:
- Additional restrictions (e.g. client authentication, server authentication)
on the allowed purposes for which the public key may be used.
type: list
elements: str
aliases: [ extKeyUsage, extendedKeyUsage ]
extended_key_usage_critical:
description:
- Should the extkeyUsage extension be considered as critical.
type: bool
aliases: [ extKeyUsage_critical, extendedKeyUsage_critical ]
basic_constraints:
description:
- Indicates basic constraints, such as if the certificate is a CA.
type: list
elements: str
aliases: [ basicConstraints ]
basic_constraints_critical:
description:
- Should the basicConstraints extension be considered as critical.
type: bool
aliases: [ basicConstraints_critical ]
ocsp_must_staple:
description:
- Indicates that the certificate should contain the OCSP Must Staple
extension (U(https://tools.ietf.org/html/rfc7633)).
type: bool
aliases: [ ocspMustStaple ]
ocsp_must_staple_critical:
description:
- Should the OCSP Must Staple extension be considered as critical.
- Note that according to the RFC, this extension should not be marked
as critical, as old clients not knowing about OCSP Must Staple
are required to reject such certificates
(see U(https://tools.ietf.org/html/rfc7633#section-4)).
type: bool
aliases: [ ocspMustStaple_critical ]
name_constraints_permitted:
description:
- For CA certificates, this specifies a list of identifiers which describe
subtrees of names that this CA is allowed to issue certificates for.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
type: list
elements: str
version_added: 1.1.0
name_constraints_excluded:
description:
- For CA certificates, this specifies a list of identifiers which describe
subtrees of names that this CA is *not* allowed to issue certificates for.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
type: list
elements: str
version_added: 1.1.0
name_constraints_critical:
description:
- Should the Name Constraints extension be considered as critical.
type: bool
version_added: 1.1.0
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
backup:
description:
- Create a backup file including a timestamp so you can get the original
CSR back if you overwrote it with a new one by accident.
type: bool
default: no
create_subject_key_identifier:
description:
- Create the Subject Key Identifier from the public key.
- "Please note that commercial CAs can ignore the value, respectively use a value of
their own choice instead. Specifying this option is mostly useful for self-signed
certificates or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
type: bool
default: no
subject_key_identifier:
description:
- The subject key identifier as a hex string, where two bytes are separated by colons.
- "Example: C(00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33)"
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this option can only be used if I(create_subject_key_identifier) is C(no).
- Note that this is only supported if the C(cryptography) backend is used!
type: str
authority_key_identifier:
description:
- The authority key identifier as a hex string, where two bytes are separated by colons.
- "Example: C(00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33)"
- If specified, I(authority_cert_issuer) must also be specified.
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: str
authority_cert_issuer:
description:
- Names that will be present in the authority cert issuer field of the certificate signing request.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA)
- "Example: C(DNS:ca.example.org)"
- If specified, I(authority_key_identifier) must also be specified.
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: list
elements: str
authority_cert_serial_number:
description:
- The authority cert serial number.
- Note that this is only supported if the C(cryptography) backend is used!
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: int
return_content:
description:
- If set to C(yes), will return the (current or generated) CSR's content as I(csr).
type: bool
default: no
version_added: "1.0.0"
extends_documentation_fragment:
- files
notes:
- If the certificate signing request already exists it will be checked whether subjectAltName,
keyUsage, extendedKeyUsage and basicConstraints only contain the requested values, whether
OCSP Must Staple is as requested, and if the request was signed by the given private key.
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL Certificate Signing Request
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with an inline key
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_content: "{{ private_key_content }}"
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with a passphrase protected private key
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with Subject information
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
country_name: FR
organization_name: Ansible
email_address: jdoe@ansible.com
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with subjectAltName extension
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
subject_alt_name: 'DNS:www.ansible.com,DNS:m.ansible.com'
- name: Generate an OpenSSL CSR with subjectAltName extension with dynamic list
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
subject_alt_name: "{{ item.value | map('regex_replace', '^', 'DNS:') | list }}"
with_dict:
dns_server:
- www.ansible.com
- m.ansible.com
- name: Force regenerate an OpenSSL Certificate Signing Request
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
force: yes
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with special key usages
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
key_usage:
- digitalSignature
- keyAgreement
extended_key_usage:
- clientAuth
- name: Generate an OpenSSL Certificate Signing Request with OCSP Must Staple
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
ocsp_must_staple: yes
- name: Generate an OpenSSL Certificate Signing Request for WinRM Certificate authentication
community.crypto.openssl_csr:
path: /etc/ssl/csr/winrm.auth.csr
privatekey_path: /etc/ssl/private/winrm.auth.pem
common_name: username
extended_key_usage:
- clientAuth
subject_alt_name: otherName:1.3.6.1.4.1.311.20.2.3;UTF8:username@localhost
'''
RETURN = r'''
privatekey:
description:
- Path to the TLS/SSL private key the CSR was generated for
- Will be C(none) if the private key has been provided in I(privatekey_content).
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
filename:
description: Path to the generated Certificate Signing Request
returned: changed or success
type: str
sample: /etc/ssl/csr/www.ansible.com.csr
subject:
description: A list of the subject tuples attached to the CSR
returned: changed or success
type: list
elements: list
sample: "[('CN', 'www.ansible.com'), ('O', 'Ansible')]"
subjectAltName:
description: The alternative names this CSR is valid for
returned: changed or success
type: list
elements: str
sample: [ 'DNS:www.ansible.com', 'DNS:m.ansible.com' ]
keyUsage:
description: Purpose for which the public key may be used
returned: changed or success
type: list
elements: str
sample: [ 'digitalSignature', 'keyAgreement' ]
extendedKeyUsage:
description: Additional restriction on the public key purposes
returned: changed or success
type: list
elements: str
sample: [ 'clientAuth' ]
basicConstraints:
description: Indicates if the certificate belongs to a CA
returned: changed or success
type: list
elements: str
sample: ['CA:TRUE', 'pathLenConstraint:0']
ocsp_must_staple:
description: Indicates whether the certificate has the OCSP
Must Staple feature enabled
returned: changed or success
type: bool
sample: false
name_constraints_permitted:
description: List of permitted subtrees to sign certificates for.
returned: changed or success
type: list
elements: str
sample: ['email:.somedomain.com']
version_added: 1.1.0
name_constraints_excluded:
description: List of excluded subtrees the CA cannot sign certificates for.
returned: changed or success
type: list
elements: str
sample: ['email:.com']
version_added: 1.1.0
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/www.ansible.com.csr.2019-03-09@11:22~
csr:
description: The (current or generated) CSR's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: "1.0.0"
'''
import abc
import binascii
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate_request,
parse_name_field,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_get_basic_constraints,
cryptography_get_name,
cryptography_name_to_oid,
cryptography_key_needs_digest_for_signing,
cryptography_parse_key_usage_params,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.pyopenssl_support import (
pyopenssl_normalize_name_attribute,
pyopenssl_parse_name_constraints,
)
MINIMAL_PYOPENSSL_VERSION = '0.15'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# OpenSSL 1.1.0 or newer
OPENSSL_MUST_STAPLE_NAME = b"tlsfeature"
OPENSSL_MUST_STAPLE_VALUE = b"status_request"
else:
# OpenSSL 1.0.x or older
OPENSSL_MUST_STAPLE_NAME = b"1.3.6.1.5.5.7.1.24"
OPENSSL_MUST_STAPLE_VALUE = b"DER:30:03:02:01:05"
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.x509
import cryptography.x509.oid
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.hashes
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
CRYPTOGRAPHY_MUST_STAPLE_NAME = cryptography.x509.oid.ObjectIdentifier("1.3.6.1.5.5.7.1.24")
CRYPTOGRAPHY_MUST_STAPLE_VALUE = b"\x30\x03\x02\x01\x05"
class CertificateSigningRequestError(OpenSSLObjectError):
pass
class CertificateSigningRequestBase(OpenSSLObject):
def __init__(self, module):
super(CertificateSigningRequestBase, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.digest = module.params['digest']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.version = module.params['version']
self.subjectAltName = module.params['subject_alt_name']
self.subjectAltName_critical = module.params['subject_alt_name_critical']
self.keyUsage = module.params['key_usage']
self.keyUsage_critical = module.params['key_usage_critical']
self.extendedKeyUsage = module.params['extended_key_usage']
self.extendedKeyUsage_critical = module.params['extended_key_usage_critical']
self.basicConstraints = module.params['basic_constraints']
self.basicConstraints_critical = module.params['basic_constraints_critical']
self.ocspMustStaple = module.params['ocsp_must_staple']
self.ocspMustStaple_critical = module.params['ocsp_must_staple_critical']
self.name_constraints_permitted = module.params['name_constraints_permitted'] or []
self.name_constraints_excluded = module.params['name_constraints_excluded'] or []
self.name_constraints_critical = module.params['name_constraints_critical']
self.create_subject_key_identifier = module.params['create_subject_key_identifier']
self.subject_key_identifier = module.params['subject_key_identifier']
self.authority_key_identifier = module.params['authority_key_identifier']
self.authority_cert_issuer = module.params['authority_cert_issuer']
self.authority_cert_serial_number = module.params['authority_cert_serial_number']
self.request = None
self.privatekey = None
self.csr_bytes = None
self.return_content = module.params['return_content']
if self.create_subject_key_identifier and self.subject_key_identifier is not None:
module.fail_json(msg='subject_key_identifier cannot be specified if create_subject_key_identifier is true')
self.backup = module.params['backup']
self.backup_file = None
self.subject = [
('C', module.params['country_name']),
('ST', module.params['state_or_province_name']),
('L', module.params['locality_name']),
('O', module.params['organization_name']),
('OU', module.params['organizational_unit_name']),
('CN', module.params['common_name']),
('emailAddress', module.params['email_address']),
]
if module.params['subject']:
self.subject = self.subject + parse_name_field(module.params['subject'])
self.subject = [(entry[0], entry[1]) for entry in self.subject if entry[1]]
self.using_common_name_for_san = False
if not self.subjectAltName and module.params['use_common_name_for_san']:
for sub in self.subject:
if sub[0] in ('commonName', 'CN'):
self.subjectAltName = ['DNS:%s' % sub[1]]
self.using_common_name_for_san = True
break
if self.subject_key_identifier is not None:
try:
self.subject_key_identifier = binascii.unhexlify(self.subject_key_identifier.replace(':', ''))
except Exception as e:
raise CertificateSigningRequestError('Cannot parse subject_key_identifier: {0}'.format(e))
if self.authority_key_identifier is not None:
try:
self.authority_key_identifier = binascii.unhexlify(self.authority_key_identifier.replace(':', ''))
except Exception as e:
raise CertificateSigningRequestError('Cannot parse authority_key_identifier: {0}'.format(e))
@abc.abstractmethod
def _generate_csr(self):
pass
def generate(self, module):
'''Generate the certificate signing request.'''
if not self.check(module, perms_required=False) or self.force:
result = self._generate_csr()
if self.backup:
self.backup_file = module.backup_local(self.path)
if self.return_content:
self.csr_bytes = result
write_file(module, result)
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
@abc.abstractmethod
def _load_private_key(self):
pass
@abc.abstractmethod
def _check_csr(self):
pass
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(CertificateSigningRequestBase, self).check(module, perms_required)
self._load_private_key()
if not state_and_perms:
return False
return self._check_csr()
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(CertificateSigningRequestBase, self).remove(module)
def dump(self):
'''Serialize the object into a dictionary.'''
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'subject': self.subject,
'subjectAltName': self.subjectAltName,
'keyUsage': self.keyUsage,
'extendedKeyUsage': self.extendedKeyUsage,
'basicConstraints': self.basicConstraints,
'ocspMustStaple': self.ocspMustStaple,
'changed': self.changed,
'name_constraints_permitted': self.name_constraints_permitted,
'name_constraints_excluded': self.name_constraints_excluded,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.csr_bytes is None:
self.csr_bytes = load_file_if_exists(self.path, ignore_errors=True)
result['csr'] = self.csr_bytes.decode('utf-8') if self.csr_bytes else None
return result
class CertificateSigningRequestPyOpenSSL(CertificateSigningRequestBase):
def __init__(self, module):
if module.params['create_subject_key_identifier']:
module.fail_json(msg='You cannot use create_subject_key_identifier with the pyOpenSSL backend!')
for o in ('subject_key_identifier', 'authority_key_identifier', 'authority_cert_issuer', 'authority_cert_serial_number'):
if module.params[o] is not None:
module.fail_json(msg='You cannot use {0} with the pyOpenSSL backend!'.format(o))
super(CertificateSigningRequestPyOpenSSL, self).__init__(module)
def _generate_csr(self):
req = crypto.X509Req()
req.set_version(self.version - 1)
subject = req.get_subject()
for entry in self.subject:
if entry[1] is not None:
# Workaround for https://github.com/pyca/pyopenssl/issues/165
nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(entry[0]))
if nid == 0:
raise CertificateSigningRequestError('Unknown subject field identifier "{0}"'.format(entry[0]))
res = OpenSSL._util.lib.X509_NAME_add_entry_by_NID(subject._name, nid, OpenSSL._util.lib.MBSTRING_UTF8, to_bytes(entry[1]), -1, -1, 0)
if res == 0:
raise CertificateSigningRequestError('Invalid value for subject field identifier "{0}": {1}'.format(entry[0], entry[1]))
extensions = []
if self.subjectAltName:
altnames = ', '.join(self.subjectAltName)
try:
extensions.append(crypto.X509Extension(b"subjectAltName", self.subjectAltName_critical, altnames.encode('ascii')))
except OpenSSL.crypto.Error as e:
raise CertificateSigningRequestError(
'Error while parsing Subject Alternative Names {0} (check for missing type prefix, such as "DNS:"!): {1}'.format(
', '.join(["{0}".format(san) for san in self.subjectAltName]), str(e)
)
)
if self.keyUsage:
usages = ', '.join(self.keyUsage)
extensions.append(crypto.X509Extension(b"keyUsage", self.keyUsage_critical, usages.encode('ascii')))
if self.extendedKeyUsage:
usages = ', '.join(self.extendedKeyUsage)
extensions.append(crypto.X509Extension(b"extendedKeyUsage", self.extendedKeyUsage_critical, usages.encode('ascii')))
if self.basicConstraints:
usages = ', '.join(self.basicConstraints)
extensions.append(crypto.X509Extension(b"basicConstraints", self.basicConstraints_critical, usages.encode('ascii')))
if self.name_constraints_permitted or self.name_constraints_excluded:
usages = ', '.join(
['permitted;{0}'.format(name) for name in self.name_constraints_permitted] +
['excluded;{0}'.format(name) for name in self.name_constraints_excluded]
)
extensions.append(crypto.X509Extension(b"nameConstraints", self.name_constraints_critical, usages.encode('ascii')))
if self.ocspMustStaple:
extensions.append(crypto.X509Extension(OPENSSL_MUST_STAPLE_NAME, self.ocspMustStaple_critical, OPENSSL_MUST_STAPLE_VALUE))
if extensions:
req.add_extensions(extensions)
req.set_pubkey(self.privatekey)
req.sign(self.privatekey, self.digest)
self.request = req
return crypto.dump_certificate_request(crypto.FILETYPE_PEM, self.request)
def _load_private_key(self):
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase
)
except OpenSSLBadPassphraseError as exc:
raise CertificateSigningRequestError(exc)
def _check_csr(self):
def _check_subject(csr):
subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in self.subject]
current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in csr.get_subject().get_components()]
if not set(subject) == set(current_subject):
return False
return True
def _check_subjectAltName(extensions):
altnames_ext = next((ext for ext in extensions if ext.get_short_name() == b'subjectAltName'), '')
altnames = [pyopenssl_normalize_name_attribute(altname.strip()) for altname in
to_text(altnames_ext, errors='surrogate_or_strict').split(',') if altname.strip()]
if self.subjectAltName:
if (set(altnames) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.subjectAltName]) or
altnames_ext.get_critical() != self.subjectAltName_critical):
return False
else:
if altnames:
return False
return True
def _check_keyUsage_(extensions, extName, expected, critical):
usages_ext = [ext for ext in extensions if ext.get_short_name() == extName]
if (not usages_ext and expected) or (usages_ext and not expected):
return False
elif not usages_ext and not expected:
return True
else:
current = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage.strip())) for usage in str(usages_ext[0]).split(',')]
expected = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage)) for usage in expected]
return set(current) == set(expected) and usages_ext[0].get_critical() == critical
def _check_keyUsage(extensions):
usages_ext = [ext for ext in extensions if ext.get_short_name() == b'keyUsage']
if (not usages_ext and self.keyUsage) or (usages_ext and not self.keyUsage):
return False
elif not usages_ext and not self.keyUsage:
return True
else:
# OpenSSL._util.lib.OBJ_txt2nid() always returns 0 for all keyUsage values
# (since keyUsage has a fixed bitfield for these values and is not extensible).
# Therefore, we create an extension for the wanted values, and compare the
# data of the extensions (which is the serialized bitfield).
expected_ext = crypto.X509Extension(b"keyUsage", False, ', '.join(self.keyUsage).encode('ascii'))
return usages_ext[0].get_data() == expected_ext.get_data() and usages_ext[0].get_critical() == self.keyUsage_critical
def _check_extenededKeyUsage(extensions):
return _check_keyUsage_(extensions, b'extendedKeyUsage', self.extendedKeyUsage, self.extendedKeyUsage_critical)
def _check_basicConstraints(extensions):
return _check_keyUsage_(extensions, b'basicConstraints', self.basicConstraints, self.basicConstraints_critical)
def _check_nameConstraints(extensions):
nc_ext = next((ext for ext in extensions if ext.get_short_name() == b'nameConstraints'), '')
permitted, excluded = pyopenssl_parse_name_constraints(nc_ext)
if self.name_constraints_permitted or self.name_constraints_excluded:
if set(permitted) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.name_constraints_permitted]):
return False
if set(excluded) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.name_constraints_excluded]):
return False
if nc_ext.get_critical() != self.name_constraints_critical:
return False
else:
if permitted or excluded:
return False
return True
def _check_ocspMustStaple(extensions):
oms_ext = [ext for ext in extensions if to_bytes(ext.get_short_name()) == OPENSSL_MUST_STAPLE_NAME and to_bytes(ext) == OPENSSL_MUST_STAPLE_VALUE]
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER < 0x10100000:
# Older versions of libssl don't know about OCSP Must Staple
oms_ext.extend([ext for ext in extensions if ext.get_short_name() == b'UNDEF' and ext.get_data() == b'\x30\x03\x02\x01\x05'])
if self.ocspMustStaple:
return len(oms_ext) > 0 and oms_ext[0].get_critical() == self.ocspMustStaple_critical
else:
return len(oms_ext) == 0
def _check_extensions(csr):
extensions = csr.get_extensions()
return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
_check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and
_check_ocspMustStaple(extensions) and _check_nameConstraints(extensions))
def _check_signature(csr):
try:
return csr.verify(self.privatekey)
except crypto.Error:
return False
try:
csr = load_certificate_request(self.path, backend='pyopenssl')
except Exception as dummy:
return False
return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
class CertificateSigningRequestCryptography(CertificateSigningRequestBase):
def __init__(self, module):
super(CertificateSigningRequestCryptography, self).__init__(module)
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
self.module = module
if self.version != 1:
module.warn('The cryptography backend only supports version 1. (The only valid value according to RFC 2986.)')
def _generate_csr(self):
csr = cryptography.x509.CertificateSigningRequestBuilder()
try:
csr = csr.subject_name(cryptography.x509.Name([
cryptography.x509.NameAttribute(cryptography_name_to_oid(entry[0]), to_text(entry[1])) for entry in self.subject
]))
except ValueError as e:
raise CertificateSigningRequestError(e)
if self.subjectAltName:
csr = csr.add_extension(cryptography.x509.SubjectAlternativeName([
cryptography_get_name(name) for name in self.subjectAltName
]), critical=self.subjectAltName_critical)
if self.keyUsage:
params = cryptography_parse_key_usage_params(self.keyUsage)
csr = csr.add_extension(cryptography.x509.KeyUsage(**params), critical=self.keyUsage_critical)
if self.extendedKeyUsage:
usages = [cryptography_name_to_oid(usage) for usage in self.extendedKeyUsage]
csr = csr.add_extension(cryptography.x509.ExtendedKeyUsage(usages), critical=self.extendedKeyUsage_critical)
if self.basicConstraints:
params = {}
ca, path_length = cryptography_get_basic_constraints(self.basicConstraints)
csr = csr.add_extension(cryptography.x509.BasicConstraints(ca, path_length), critical=self.basicConstraints_critical)
if self.ocspMustStaple:
try:
# This only works with cryptography >= 2.1
csr = csr.add_extension(cryptography.x509.TLSFeature([cryptography.x509.TLSFeatureType.status_request]), critical=self.ocspMustStaple_critical)
except AttributeError as dummy:
csr = csr.add_extension(
cryptography.x509.UnrecognizedExtension(CRYPTOGRAPHY_MUST_STAPLE_NAME, CRYPTOGRAPHY_MUST_STAPLE_VALUE),
critical=self.ocspMustStaple_critical
)
if self.name_constraints_permitted or self.name_constraints_excluded:
try:
csr = csr.add_extension(cryptography.x509.NameConstraints(
[cryptography_get_name(name) for name in self.name_constraints_permitted],
[cryptography_get_name(name) for name in self.name_constraints_excluded],
), critical=self.name_constraints_critical)
except TypeError as e:
raise OpenSSLObjectError('Error while parsing name constraint: {0}'.format(e))
if self.create_subject_key_identifier:
csr = csr.add_extension(
cryptography.x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()),
critical=False
)
elif self.subject_key_identifier is not None:
csr = csr.add_extension(cryptography.x509.SubjectKeyIdentifier(self.subject_key_identifier), critical=False)
if self.authority_key_identifier is not None or self.authority_cert_issuer is not None or self.authority_cert_serial_number is not None:
issuers = None
if self.authority_cert_issuer is not None:
issuers = [cryptography_get_name(n) for n in self.authority_cert_issuer]
csr = csr.add_extension(
cryptography.x509.AuthorityKeyIdentifier(self.authority_key_identifier, issuers, self.authority_cert_serial_number),
critical=False
)
digest = None
if cryptography_key_needs_digest_for_signing(self.privatekey):
if self.digest == 'sha256':
digest = cryptography.hazmat.primitives.hashes.SHA256()
elif self.digest == 'sha384':
digest = cryptography.hazmat.primitives.hashes.SHA384()
elif self.digest == 'sha512':
digest = cryptography.hazmat.primitives.hashes.SHA512()
elif self.digest == 'sha1':
digest = cryptography.hazmat.primitives.hashes.SHA1()
elif self.digest == 'md5':
digest = cryptography.hazmat.primitives.hashes.MD5()
# FIXME
else:
raise CertificateSigningRequestError('Unsupported digest "{0}"'.format(self.digest))
try:
self.request = csr.sign(self.privatekey, digest, self.cryptography_backend)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and digest is None:
self.module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
except UnicodeError as e:
# This catches IDNAErrors, which happens when a bad name is passed as a SAN
# (https://github.com/ansible-collections/community.crypto/issues/105).
# For older cryptography versions, this is handled by idna, which raises
# an idna.core.IDNAError. Later versions of cryptography deprecated and stopped
# requiring idna, whence we cannot easily handle this error. Fortunately, in
# most versions of idna, IDNAError extends UnicodeError. There is only version
# 2.3 where it extends Exception instead (see
# https://github.com/kjd/idna/commit/ebefacd3134d0f5da4745878620a6a1cba86d130
# and then
# https://github.com/kjd/idna/commit/ea03c7b5db7d2a99af082e0239da2b68aeea702a).
msg = 'Error while creating CSR: {0}\n'.format(e)
if self.using_common_name_for_san:
self.module.fail_json(msg=msg + 'This is probably caused because the Common Name is used as a SAN.'
' Specifying use_common_name_for_san=false might fix this.')
self.module.fail_json(msg=msg + 'This is probably caused by an invalid Subject Alternative DNS Name.')
return self.request.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM)
def _load_private_key(self):
try:
if self.privatekey_content is not None:
content = self.privatekey_content
else:
with open(self.privatekey_path, 'rb') as f:
content = f.read()
self.privatekey = cryptography.hazmat.primitives.serialization.load_pem_private_key(
content,
None if self.privatekey_passphrase is None else to_bytes(self.privatekey_passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise CertificateSigningRequestError(e)
def _check_csr(self):
def _check_subject(csr):
subject = [(cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.subject]
current_subject = [(sub.oid, sub.value) for sub in csr.subject]
return set(subject) == set(current_subject)
def _find_extension(extensions, exttype):
return next(
(ext for ext in extensions if isinstance(ext.value, exttype)),
None
)
def _check_subjectAltName(extensions):
current_altnames_ext = _find_extension(extensions, cryptography.x509.SubjectAlternativeName)
current_altnames = [str(altname) for altname in current_altnames_ext.value] if current_altnames_ext else []
altnames = [str(cryptography_get_name(altname)) for altname in self.subjectAltName] if self.subjectAltName else []
if set(altnames) != set(current_altnames):
return False
if altnames:
if current_altnames_ext.critical != self.subjectAltName_critical:
return False
return True
def _check_keyUsage(extensions):
current_keyusage_ext = _find_extension(extensions, cryptography.x509.KeyUsage)
if not self.keyUsage:
return current_keyusage_ext is None
elif current_keyusage_ext is None:
return False
params = cryptography_parse_key_usage_params(self.keyUsage)
for param in params:
if getattr(current_keyusage_ext.value, '_' + param) != params[param]:
return False
if current_keyusage_ext.critical != self.keyUsage_critical:
return False
return True
def _check_extenededKeyUsage(extensions):
current_usages_ext = _find_extension(extensions, cryptography.x509.ExtendedKeyUsage)
current_usages = [str(usage) for usage in current_usages_ext.value] if current_usages_ext else []
usages = [str(cryptography_name_to_oid(usage)) for usage in self.extendedKeyUsage] if self.extendedKeyUsage else []
if set(current_usages) != set(usages):
return False
if usages:
if current_usages_ext.critical != self.extendedKeyUsage_critical:
return False
return True
def _check_basicConstraints(extensions):
bc_ext = _find_extension(extensions, cryptography.x509.BasicConstraints)
current_ca = bc_ext.value.ca if bc_ext else False
current_path_length = bc_ext.value.path_length if bc_ext else None
ca, path_length = cryptography_get_basic_constraints(self.basicConstraints)
# Check CA flag
if ca != current_ca:
return False
# Check path length
if path_length != current_path_length:
return False
# Check criticality
if self.basicConstraints:
if bc_ext.critical != self.basicConstraints_critical:
return False
return True
def _check_ocspMustStaple(extensions):
try:
# This only works with cryptography >= 2.1
tlsfeature_ext = _find_extension(extensions, cryptography.x509.TLSFeature)
has_tlsfeature = True
except AttributeError as dummy:
tlsfeature_ext = next(
(ext for ext in extensions if ext.value.oid == CRYPTOGRAPHY_MUST_STAPLE_NAME),
None
)
has_tlsfeature = False
if self.ocspMustStaple:
if not tlsfeature_ext or tlsfeature_ext.critical != self.ocspMustStaple_critical:
return False
if has_tlsfeature:
return cryptography.x509.TLSFeatureType.status_request in tlsfeature_ext.value
else:
return tlsfeature_ext.value.value == CRYPTOGRAPHY_MUST_STAPLE_VALUE
else:
return tlsfeature_ext is None
def _check_nameConstraints(extensions):
current_nc_ext = _find_extension(extensions, cryptography.x509.NameConstraints)
current_nc_perm = [str(altname) for altname in current_nc_ext.value.permitted_subtrees] if current_nc_ext else []
current_nc_excl = [str(altname) for altname in current_nc_ext.value.excluded_subtrees] if current_nc_ext else []
nc_perm = [str(cryptography_get_name(altname)) for altname in self.name_constraints_permitted]
nc_excl = [str(cryptography_get_name(altname)) for altname in self.name_constraints_excluded]
if set(nc_perm) != set(current_nc_perm) or set(nc_excl) != set(current_nc_excl):
return False
if nc_perm or nc_excl:
if current_nc_ext.critical != self.name_constraints_critical:
return False
return True
def _check_subject_key_identifier(extensions):
ext = _find_extension(extensions, cryptography.x509.SubjectKeyIdentifier)
if self.create_subject_key_identifier or self.subject_key_identifier is not None:
if not ext or ext.critical:
return False
if self.create_subject_key_identifier:
digest = cryptography.x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()).digest
return ext.value.digest == digest
else:
return ext.value.digest == self.subject_key_identifier
else:
return ext is None
def _check_authority_key_identifier(extensions):
ext = _find_extension(extensions, cryptography.x509.AuthorityKeyIdentifier)
if self.authority_key_identifier is not None or self.authority_cert_issuer is not None or self.authority_cert_serial_number is not None:
if not ext or ext.critical:
return False
aci = None
csr_aci = None
if self.authority_cert_issuer is not None:
aci = [str(cryptography_get_name(n)) for n in self.authority_cert_issuer]
if ext.value.authority_cert_issuer is not None:
csr_aci = [str(n) for n in ext.value.authority_cert_issuer]
return (ext.value.key_identifier == self.authority_key_identifier
and csr_aci == aci
and ext.value.authority_cert_serial_number == self.authority_cert_serial_number)
else:
return ext is None
def _check_extensions(csr):
extensions = csr.extensions
return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
_check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and
_check_ocspMustStaple(extensions) and _check_subject_key_identifier(extensions) and
_check_authority_key_identifier(extensions) and _check_nameConstraints(extensions))
def _check_signature(csr):
if not csr.is_signature_valid:
return False
# To check whether public key of CSR belongs to private key,
# encode both public keys and compare PEMs.
key_a = csr.public_key().public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.PEM,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
key_b = self.privatekey.public_key().public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.PEM,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
return key_a == key_b
try:
csr = load_certificate_request(self.path, backend='cryptography')
except Exception as dummy:
return False
return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
digest=dict(type='str', default='sha256'),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
privatekey_passphrase=dict(type='str', no_log=True),
version=dict(type='int', default=1),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
subject=dict(type='dict'),
country_name=dict(type='str', aliases=['C', 'countryName']),
state_or_province_name=dict(type='str', aliases=['ST', 'stateOrProvinceName']),
locality_name=dict(type='str', aliases=['L', 'localityName']),
organization_name=dict(type='str', aliases=['O', 'organizationName']),
organizational_unit_name=dict(type='str', aliases=['OU', 'organizationalUnitName']),
common_name=dict(type='str', aliases=['CN', 'commonName']),
email_address=dict(type='str', aliases=['E', 'emailAddress']),
subject_alt_name=dict(type='list', elements='str', aliases=['subjectAltName']),
subject_alt_name_critical=dict(type='bool', default=False, aliases=['subjectAltName_critical']),
use_common_name_for_san=dict(type='bool', default=True, aliases=['useCommonNameForSAN']),
key_usage=dict(type='list', elements='str', aliases=['keyUsage']),
key_usage_critical=dict(type='bool', default=False, aliases=['keyUsage_critical']),
extended_key_usage=dict(type='list', elements='str', aliases=['extKeyUsage', 'extendedKeyUsage']),
extended_key_usage_critical=dict(type='bool', default=False, aliases=['extKeyUsage_critical', 'extendedKeyUsage_critical']),
basic_constraints=dict(type='list', elements='str', aliases=['basicConstraints']),
basic_constraints_critical=dict(type='bool', default=False, aliases=['basicConstraints_critical']),
ocsp_must_staple=dict(type='bool', default=False, aliases=['ocspMustStaple']),
ocsp_must_staple_critical=dict(type='bool', default=False, aliases=['ocspMustStaple_critical']),
name_constraints_permitted=dict(type='list', elements='str'),
name_constraints_excluded=dict(type='list', elements='str'),
name_constraints_critical=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
create_subject_key_identifier=dict(type='bool', default=False),
subject_key_identifier=dict(type='str'),
authority_key_identifier=dict(type='str'),
authority_cert_issuer=dict(type='list', elements='str'),
authority_cert_serial_number=dict(type='int'),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
return_content=dict(type='bool', default=False),
),
required_together=[('authority_cert_issuer', 'authority_cert_serial_number')],
required_if=[('state', 'present', ['privatekey_path', 'privatekey_content'], True)],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
add_file_common_args=True,
supports_check_mode=True,
)
if module.params['version'] != 1:
module.deprecate('The version option will only support allowed values from community.crypto 2.0.0 on. '
'Currently, only the value 1 is allowed by RFC 2986',
version='2.0.0', collection_name='community.crypto')
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15 to generate CSRs')
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
csr = CertificateSigningRequestPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
csr = CertificateSigningRequestCryptography(module)
if module.params['state'] == 'present':
if module.check_mode:
result = csr.dump()
result['changed'] = module.params['force'] or not csr.check(module)
module.exit_json(**result)
csr.generate(module)
else:
if module.check_mode:
result = csr.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
csr.remove(module)
result = csr.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/bad_4295_1 |
crossvul-python_data_bad_4295_4 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_publickey
short_description: Generate an OpenSSL public key from its private key.
description:
- This module allows one to (re)generate OpenSSL public keys from their private keys.
- Keys are generated in PEM or OpenSSH format.
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. When I(format) is C(OpenSSH),
the C(cryptography) backend has to be used. Please note that the PyOpenSSL backend
was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0."
requirements:
- Either cryptography >= 1.2.3 (older versions might work as well)
- Or pyOpenSSL >= 16.0.0
- Needs cryptography >= 1.4 if I(format) is C(OpenSSH)
author:
- Yanis Guenane (@Spredzy)
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the public key should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
force:
description:
- Should the key be regenerated even it it already exists.
type: bool
default: no
format:
description:
- The format of the public key.
type: str
default: PEM
choices: [ OpenSSH, PEM ]
path:
description:
- Name of the file in which the generated TLS/SSL public key will be written.
type: path
required: true
privatekey_path:
description:
- Path to the TLS/SSL private key from which to generate the public key.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
If I(state) is C(present), one of them is required.
type: path
privatekey_content:
description:
- The content of the TLS/SSL private key from which to generate the public key.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
If I(state) is C(present), one of them is required.
type: str
version_added: '1.0.0'
privatekey_passphrase:
description:
- The passphrase for the private key.
type: str
backup:
description:
- Create a backup file including a timestamp so you can get the original
public key back if you overwrote it with a different one by accident.
type: bool
default: no
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
return_content:
description:
- If set to C(yes), will return the (current or generated) public key's content as I(publickey).
type: bool
default: no
version_added: '1.0.0'
extends_documentation_fragment:
- files
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL public key in PEM format
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
- name: Generate an OpenSSL public key in PEM format from an inline key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_content: "{{ private_key_content }}"
- name: Generate an OpenSSL public key in OpenSSH v2 format
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
format: OpenSSH
- name: Generate an OpenSSL public key with a passphrase protected private key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
- name: Force regenerate an OpenSSL public key if it already exists
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
force: yes
- name: Remove an OpenSSL public key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
state: absent
'''
RETURN = r'''
privatekey:
description:
- Path to the TLS/SSL private key the public key was generated from.
- Will be C(none) if the private key has been provided in I(privatekey_content).
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
format:
description: The format of the public key (PEM, OpenSSH, ...).
returned: changed or success
type: str
sample: PEM
filename:
description: Path to the generated TLS/SSL public key file.
returned: changed or success
type: str
sample: /etc/ssl/public/ansible.com.pem
fingerprint:
description:
- The fingerprint of the public key. Fingerprint will be generated for each hashlib.algorithms available.
- Requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/publickey.pem.2019-03-09@11:22~
publickey:
description: The (current or generated) public key's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint,
)
MINIMAL_PYOPENSSL_VERSION = '16.0.0'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH = '1.4'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization as crypto_serialization
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class PublicKeyError(OpenSSLObjectError):
pass
class PublicKey(OpenSSLObject):
def __init__(self, module, backend):
super(PublicKey, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.format = module.params['format']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.privatekey = None
self.publickey_bytes = None
self.return_content = module.params['return_content']
self.fingerprint = {}
self.backend = backend
self.backup = module.params['backup']
self.backup_file = None
def _create_publickey(self, module):
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
if self.backend == 'cryptography':
if self.format == 'OpenSSH':
return self.privatekey.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
else:
return self.privatekey.public_key().public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
try:
return crypto.dump_publickey(crypto.FILETYPE_PEM, self.privatekey)
except AttributeError as dummy:
raise PublicKeyError('You need to have PyOpenSSL>=16.0.0 to generate public keys')
def generate(self, module):
"""Generate the public key."""
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise PublicKeyError(
'The private key %s does not exist' % self.privatekey_path
)
if not self.check(module, perms_required=False) or self.force:
try:
publickey_content = self._create_publickey(module)
if self.return_content:
self.publickey_bytes = publickey_content
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, publickey_content)
self.changed = True
except OpenSSLBadPassphraseError as exc:
raise PublicKeyError(exc)
except (IOError, OSError) as exc:
raise PublicKeyError(exc)
self.fingerprint = get_fingerprint(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PublicKey, self).check(module, perms_required)
def _check_privatekey():
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
return False
try:
with open(self.path, 'rb') as public_key_fh:
publickey_content = public_key_fh.read()
if self.return_content:
self.publickey_bytes = publickey_content
if self.backend == 'cryptography':
if self.format == 'OpenSSH':
# Read and dump public key. Makes sure that the comment is stripped off.
current_publickey = crypto_serialization.load_ssh_public_key(publickey_content, backend=default_backend())
publickey_content = current_publickey.public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
else:
current_publickey = crypto_serialization.load_pem_public_key(publickey_content, backend=default_backend())
publickey_content = current_publickey.public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
publickey_content = crypto.dump_publickey(
crypto.FILETYPE_PEM,
crypto.load_publickey(crypto.FILETYPE_PEM, publickey_content)
)
except Exception as dummy:
return False
try:
desired_publickey = self._create_publickey(module)
except OpenSSLBadPassphraseError as exc:
raise PublicKeyError(exc)
return publickey_content == desired_publickey
if not state_and_perms:
return state_and_perms
return _check_privatekey()
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(PublicKey, self).remove(module)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'format': self.format,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.publickey_bytes is None:
self.publickey_bytes = load_file_if_exists(self.path, ignore_errors=True)
result['publickey'] = self.publickey_bytes.decode('utf-8') if self.publickey_bytes else None
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
format=dict(type='str', default='PEM', choices=['OpenSSH', 'PEM']),
privatekey_passphrase=dict(type='str', no_log=True),
backup=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
return_content=dict(type='bool', default=False),
),
supports_check_mode=True,
add_file_common_args=True,
required_if=[('state', 'present', ['privatekey_path', 'privatekey_content'], True)],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
)
minimal_cryptography_version = MINIMAL_CRYPTOGRAPHY_VERSION
if module.params['format'] == 'OpenSSH':
minimal_cryptography_version = MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(minimal_cryptography_version)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
if module.params['format'] == 'OpenSSH':
module.fail_json(
msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH)),
exception=CRYPTOGRAPHY_IMP_ERR
)
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
minimal_cryptography_version,
MINIMAL_PYOPENSSL_VERSION))
if module.params['format'] == 'OpenSSH' and backend != 'cryptography':
module.fail_json(msg="Format OpenSSH requires the cryptography backend.")
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(minimal_cryptography_version)),
exception=CRYPTOGRAPHY_IMP_ERR)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg="The directory '%s' does not exist or the file is not a directory" % base_dir
)
try:
public_key = PublicKey(module, backend)
if public_key.state == 'present':
if module.check_mode:
result = public_key.dump()
result['changed'] = module.params['force'] or not public_key.check(module)
module.exit_json(**result)
public_key.generate(module)
else:
if module.check_mode:
result = public_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
public_key.remove(module)
result = public_key.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/bad_4295_4 |
crossvul-python_data_good_4295_4 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_publickey
short_description: Generate an OpenSSL public key from its private key.
description:
- This module allows one to (re)generate OpenSSL public keys from their private keys.
- Keys are generated in PEM or OpenSSH format.
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. When I(format) is C(OpenSSH),
the C(cryptography) backend has to be used. Please note that the PyOpenSSL backend
was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0."
requirements:
- Either cryptography >= 1.2.3 (older versions might work as well)
- Or pyOpenSSL >= 16.0.0
- Needs cryptography >= 1.4 if I(format) is C(OpenSSH)
author:
- Yanis Guenane (@Spredzy)
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the public key should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
force:
description:
- Should the key be regenerated even it it already exists.
type: bool
default: no
format:
description:
- The format of the public key.
type: str
default: PEM
choices: [ OpenSSH, PEM ]
path:
description:
- Name of the file in which the generated TLS/SSL public key will be written.
type: path
required: true
privatekey_path:
description:
- Path to the TLS/SSL private key from which to generate the public key.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
If I(state) is C(present), one of them is required.
type: path
privatekey_content:
description:
- The content of the TLS/SSL private key from which to generate the public key.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
If I(state) is C(present), one of them is required.
type: str
version_added: '1.0.0'
privatekey_passphrase:
description:
- The passphrase for the private key.
type: str
backup:
description:
- Create a backup file including a timestamp so you can get the original
public key back if you overwrote it with a different one by accident.
type: bool
default: no
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
return_content:
description:
- If set to C(yes), will return the (current or generated) public key's content as I(publickey).
type: bool
default: no
version_added: '1.0.0'
extends_documentation_fragment:
- files
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL public key in PEM format
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
- name: Generate an OpenSSL public key in PEM format from an inline key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_content: "{{ private_key_content }}"
- name: Generate an OpenSSL public key in OpenSSH v2 format
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
format: OpenSSH
- name: Generate an OpenSSL public key with a passphrase protected private key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
- name: Force regenerate an OpenSSL public key if it already exists
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
force: yes
- name: Remove an OpenSSL public key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
state: absent
'''
RETURN = r'''
privatekey:
description:
- Path to the TLS/SSL private key the public key was generated from.
- Will be C(none) if the private key has been provided in I(privatekey_content).
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
format:
description: The format of the public key (PEM, OpenSSH, ...).
returned: changed or success
type: str
sample: PEM
filename:
description: Path to the generated TLS/SSL public key file.
returned: changed or success
type: str
sample: /etc/ssl/public/ansible.com.pem
fingerprint:
description:
- The fingerprint of the public key. Fingerprint will be generated for each hashlib.algorithms available.
- Requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/publickey.pem.2019-03-09@11:22~
publickey:
description: The (current or generated) public key's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint,
)
MINIMAL_PYOPENSSL_VERSION = '16.0.0'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH = '1.4'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization as crypto_serialization
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class PublicKeyError(OpenSSLObjectError):
pass
class PublicKey(OpenSSLObject):
def __init__(self, module, backend):
super(PublicKey, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.format = module.params['format']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.privatekey = None
self.publickey_bytes = None
self.return_content = module.params['return_content']
self.fingerprint = {}
self.backend = backend
self.backup = module.params['backup']
self.backup_file = None
def _create_publickey(self, module):
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
if self.backend == 'cryptography':
if self.format == 'OpenSSH':
return self.privatekey.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
else:
return self.privatekey.public_key().public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
try:
return crypto.dump_publickey(crypto.FILETYPE_PEM, self.privatekey)
except AttributeError as dummy:
raise PublicKeyError('You need to have PyOpenSSL>=16.0.0 to generate public keys')
def generate(self, module):
"""Generate the public key."""
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise PublicKeyError(
'The private key %s does not exist' % self.privatekey_path
)
if not self.check(module, perms_required=False) or self.force:
try:
publickey_content = self._create_publickey(module)
if self.return_content:
self.publickey_bytes = publickey_content
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, publickey_content)
self.changed = True
except OpenSSLBadPassphraseError as exc:
raise PublicKeyError(exc)
except (IOError, OSError) as exc:
raise PublicKeyError(exc)
self.fingerprint = get_fingerprint(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PublicKey, self).check(module, perms_required)
def _check_privatekey():
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
return False
try:
with open(self.path, 'rb') as public_key_fh:
publickey_content = public_key_fh.read()
if self.return_content:
self.publickey_bytes = publickey_content
if self.backend == 'cryptography':
if self.format == 'OpenSSH':
# Read and dump public key. Makes sure that the comment is stripped off.
current_publickey = crypto_serialization.load_ssh_public_key(publickey_content, backend=default_backend())
publickey_content = current_publickey.public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
else:
current_publickey = crypto_serialization.load_pem_public_key(publickey_content, backend=default_backend())
publickey_content = current_publickey.public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
publickey_content = crypto.dump_publickey(
crypto.FILETYPE_PEM,
crypto.load_publickey(crypto.FILETYPE_PEM, publickey_content)
)
except Exception as dummy:
return False
try:
desired_publickey = self._create_publickey(module)
except OpenSSLBadPassphraseError as exc:
raise PublicKeyError(exc)
return publickey_content == desired_publickey
if not state_and_perms:
return state_and_perms
return _check_privatekey()
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(PublicKey, self).remove(module)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'format': self.format,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.publickey_bytes is None:
self.publickey_bytes = load_file_if_exists(self.path, ignore_errors=True)
result['publickey'] = self.publickey_bytes.decode('utf-8') if self.publickey_bytes else None
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
format=dict(type='str', default='PEM', choices=['OpenSSH', 'PEM']),
privatekey_passphrase=dict(type='str', no_log=True),
backup=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
return_content=dict(type='bool', default=False),
),
supports_check_mode=True,
add_file_common_args=True,
required_if=[('state', 'present', ['privatekey_path', 'privatekey_content'], True)],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
)
minimal_cryptography_version = MINIMAL_CRYPTOGRAPHY_VERSION
if module.params['format'] == 'OpenSSH':
minimal_cryptography_version = MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(minimal_cryptography_version)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
if module.params['format'] == 'OpenSSH':
module.fail_json(
msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH)),
exception=CRYPTOGRAPHY_IMP_ERR
)
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
minimal_cryptography_version,
MINIMAL_PYOPENSSL_VERSION))
if module.params['format'] == 'OpenSSH' and backend != 'cryptography':
module.fail_json(msg="Format OpenSSH requires the cryptography backend.")
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(minimal_cryptography_version)),
exception=CRYPTOGRAPHY_IMP_ERR)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg="The directory '%s' does not exist or the file is not a directory" % base_dir
)
try:
public_key = PublicKey(module, backend)
if public_key.state == 'present':
if module.check_mode:
result = public_key.dump()
result['changed'] = module.params['force'] or not public_key.check(module)
module.exit_json(**result)
public_key.generate(module)
else:
if module.check_mode:
result = public_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
public_key.remove(module)
result = public_key.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/good_4295_4 |
crossvul-python_data_bad_4295_3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_privatekey_info
short_description: Provide information for OpenSSL private keys
description:
- This module allows one to query information on OpenSSL private keys.
- In case the key consistency checks fail, the module will fail as this indicates a faked
private key. In this case, all return variables are still returned. Note that key consistency
checks are not available all key types; if none is available, C(none) is returned for
C(key_is_consistent).
- It uses the pyOpenSSL or cryptography python library to interact with OpenSSL. If both the
cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with
C(select_crypto_backend)). Please note that the PyOpenSSL backend was deprecated in Ansible 2.9
and will be removed in community.crypto 2.0.0.
requirements:
- PyOpenSSL >= 0.15 or cryptography >= 1.2.3
author:
- Felix Fontein (@felixfontein)
- Yanis Guenane (@Spredzy)
options:
path:
description:
- Remote absolute path where the private key file is loaded from.
type: path
content:
description:
- Content of the private key file.
- Either I(path) or I(content) must be specified, but not both.
type: str
version_added: '1.0.0'
passphrase:
description:
- The passphrase for the private key.
type: str
return_private_key_data:
description:
- Whether to return private key data.
- Only set this to C(yes) when you want private information about this key to
leave the remote machine.
- "WARNING: you have to make sure that private key data isn't accidentally logged!"
type: bool
default: no
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
seealso:
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Get information on generated key
community.crypto.openssl_privatekey_info:
path: /etc/ssl/private/ansible.com.pem
register: result
- name: Dump information
debug:
var: result
'''
RETURN = r'''
can_load_key:
description: Whether the module was able to load the private key from disk
returned: always
type: bool
can_parse_key:
description: Whether the module was able to parse the private key
returned: always
type: bool
key_is_consistent:
description:
- Whether the key is consistent. Can also return C(none) next to C(yes) and
C(no), to indicate that consistency couldn't be checked.
- In case the check returns C(no), the module will fail.
returned: always
type: bool
public_key:
description: Private key's public key in PEM format
returned: success
type: str
sample: "-----BEGIN PUBLIC KEY-----\nMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A..."
public_key_fingerprints:
description:
- Fingerprints of private key's public key.
- For every hash algorithm available, the fingerprint is computed.
returned: success
type: dict
sample: "{'sha256': 'd4:b3:aa:6d:c8:04:ce:4e:ba:f6:29:4d:92:a3:94:b0:c2:ff:bd:bf:33:63:11:43:34:0f:51:b0:95:09:2f:63',
'sha512': 'f7:07:4a:f0:b0:f0:e6:8b:95:5f:f9:e6:61:0a:32:68:f1..."
type:
description:
- The key's type.
- One of C(RSA), C(DSA), C(ECC), C(Ed25519), C(X25519), C(Ed448), or C(X448).
- Will start with C(unknown) if the key type cannot be determined.
returned: success
type: str
sample: RSA
public_data:
description:
- Public key data. Depends on key type.
returned: success
type: dict
private_data:
description:
- Private key data. Depends on key type.
returned: success and when I(return_private_key_data) is set to C(yes)
type: dict
'''
import abc
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_X25519,
CRYPTOGRAPHY_HAS_X448,
CRYPTOGRAPHY_HAS_ED25519,
CRYPTOGRAPHY_HAS_ED448,
OpenSSLObjectError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint_of_bytes,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.math import (
binary_exp_mod,
quick_is_not_prime,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
MINIMAL_PYOPENSSL_VERSION = '0.15'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography.hazmat.primitives import serialization
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
SIGNATURE_TEST_DATA = b'1234'
def _get_cryptography_key_info(key):
key_public_data = dict()
key_private_data = dict()
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
key_type = 'RSA'
key_public_data['size'] = key.key_size
key_public_data['modulus'] = key.public_key().public_numbers().n
key_public_data['exponent'] = key.public_key().public_numbers().e
key_private_data['p'] = key.private_numbers().p
key_private_data['q'] = key.private_numbers().q
key_private_data['exponent'] = key.private_numbers().d
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
key_type = 'DSA'
key_public_data['size'] = key.key_size
key_public_data['p'] = key.parameters().parameter_numbers().p
key_public_data['q'] = key.parameters().parameter_numbers().q
key_public_data['g'] = key.parameters().parameter_numbers().g
key_public_data['y'] = key.public_key().public_numbers().y
key_private_data['x'] = key.private_numbers().x
elif CRYPTOGRAPHY_HAS_X25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
key_type = 'X25519'
elif CRYPTOGRAPHY_HAS_X448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
key_type = 'X448'
elif CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
key_type = 'Ed25519'
elif CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
key_type = 'Ed448'
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
key_type = 'ECC'
key_public_data['curve'] = key.public_key().curve.name
key_public_data['x'] = key.public_key().public_numbers().x
key_public_data['y'] = key.public_key().public_numbers().y
key_public_data['exponent_size'] = key.public_key().curve.key_size
key_private_data['multiplier'] = key.private_numbers().private_value
else:
key_type = 'unknown ({0})'.format(type(key))
return key_type, key_public_data, key_private_data
def _check_dsa_consistency(key_public_data, key_private_data):
# Get parameters
p = key_public_data.get('p')
q = key_public_data.get('q')
g = key_public_data.get('g')
y = key_public_data.get('y')
x = key_private_data.get('x')
for v in (p, q, g, y, x):
if v is None:
return None
# Make sure that g is not 0, 1 or -1 in Z/pZ
if g < 2 or g >= p - 1:
return False
# Make sure that x is in range
if x < 1 or x >= q:
return False
# Check whether q divides p-1
if (p - 1) % q != 0:
return False
# Check that g**q mod p == 1
if binary_exp_mod(g, q, p) != 1:
return False
# Check whether g**x mod p == y
if binary_exp_mod(g, x, p) != y:
return False
# Check (quickly) whether p or q are not primes
if quick_is_not_prime(q) or quick_is_not_prime(p):
return False
return True
def _is_cryptography_key_consistent(key, key_public_data, key_private_data):
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return bool(key._backend._lib.RSA_check_key(key._rsa_cdata))
if isinstance(key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
result = _check_dsa_consistency(key_public_data, key_private_data)
if result is not None:
return result
try:
signature = key.sign(SIGNATURE_TEST_DATA, cryptography.hazmat.primitives.hashes.SHA256())
except AttributeError:
# sign() was added in cryptography 1.5, but we support older versions
return None
try:
key.public_key().verify(
signature,
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.hashes.SHA256()
)
return True
except cryptography.exceptions.InvalidSignature:
return False
if isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
try:
signature = key.sign(
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.asymmetric.ec.ECDSA(cryptography.hazmat.primitives.hashes.SHA256())
)
except AttributeError:
# sign() was added in cryptography 1.5, but we support older versions
return None
try:
key.public_key().verify(
signature,
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.asymmetric.ec.ECDSA(cryptography.hazmat.primitives.hashes.SHA256())
)
return True
except cryptography.exceptions.InvalidSignature:
return False
has_simple_sign_function = False
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
has_simple_sign_function = True
if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
has_simple_sign_function = True
if has_simple_sign_function:
signature = key.sign(SIGNATURE_TEST_DATA)
try:
key.public_key().verify(signature, SIGNATURE_TEST_DATA)
return True
except cryptography.exceptions.InvalidSignature:
return False
# For X25519 and X448, there's no test yet.
return None
class PrivateKeyInfo(OpenSSLObject):
def __init__(self, module, backend):
super(PrivateKeyInfo, self).__init__(
module.params['path'] or '',
'present',
False,
module.check_mode,
)
self.backend = backend
self.module = module
self.content = module.params['content']
self.passphrase = module.params['passphrase']
self.return_private_key_data = module.params['return_private_key_data']
def generate(self):
# Empty method because OpenSSLObject wants this
pass
def dump(self):
# Empty method because OpenSSLObject wants this
pass
@abc.abstractmethod
def _get_public_key(self, binary):
pass
@abc.abstractmethod
def _get_key_info(self):
pass
@abc.abstractmethod
def _is_key_consistent(self, key_public_data, key_private_data):
pass
def get_info(self):
result = dict(
can_load_key=False,
can_parse_key=False,
key_is_consistent=None,
)
if self.content is not None:
priv_key_detail = self.content.encode('utf-8')
result['can_load_key'] = True
else:
try:
with open(self.path, 'rb') as b_priv_key_fh:
priv_key_detail = b_priv_key_fh.read()
result['can_load_key'] = True
except (IOError, OSError) as exc:
self.module.fail_json(msg=to_native(exc), **result)
try:
self.key = load_privatekey(
path=None,
content=priv_key_detail,
passphrase=to_bytes(self.passphrase) if self.passphrase is not None else self.passphrase,
backend=self.backend
)
result['can_parse_key'] = True
except OpenSSLObjectError as exc:
self.module.fail_json(msg=to_native(exc), **result)
result['public_key'] = self._get_public_key(binary=False)
pk = self._get_public_key(binary=True)
result['public_key_fingerprints'] = get_fingerprint_of_bytes(pk) if pk is not None else dict()
key_type, key_public_data, key_private_data = self._get_key_info()
result['type'] = key_type
result['public_data'] = key_public_data
if self.return_private_key_data:
result['private_data'] = key_private_data
result['key_is_consistent'] = self._is_key_consistent(key_public_data, key_private_data)
if result['key_is_consistent'] is False:
# Only fail when it is False, to avoid to fail on None (which means "we don't know")
result['key_is_consistent'] = False
self.module.fail_json(
msg="Private key is not consistent! (See "
"https://blog.hboeck.de/archives/888-How-I-tricked-Symantec-with-a-Fake-Private-Key.html)",
**result
)
return result
class PrivateKeyInfoCryptography(PrivateKeyInfo):
"""Validate the supplied private key, using the cryptography backend"""
def __init__(self, module):
super(PrivateKeyInfoCryptography, self).__init__(module, 'cryptography')
def _get_public_key(self, binary):
return self.key.public_key().public_bytes(
serialization.Encoding.DER if binary else serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
def _get_key_info(self):
return _get_cryptography_key_info(self.key)
def _is_key_consistent(self, key_public_data, key_private_data):
return _is_cryptography_key_consistent(self.key, key_public_data, key_private_data)
class PrivateKeyInfoPyOpenSSL(PrivateKeyInfo):
"""validate the supplied private key."""
def __init__(self, module):
super(PrivateKeyInfoPyOpenSSL, self).__init__(module, 'pyopenssl')
def _get_public_key(self, binary):
try:
return crypto.dump_publickey(
crypto.FILETYPE_ASN1 if binary else crypto.FILETYPE_PEM,
self.key
)
except AttributeError:
try:
# pyOpenSSL < 16.0:
bio = crypto._new_mem_buf()
if binary:
rc = crypto._lib.i2d_PUBKEY_bio(bio, self.key._pkey)
else:
rc = crypto._lib.PEM_write_bio_PUBKEY(bio, self.key._pkey)
if rc != 1:
crypto._raise_current_error()
return crypto._bio_to_string(bio)
except AttributeError:
self.module.warn('Your pyOpenSSL version does not support dumping public keys. '
'Please upgrade to version 16.0 or newer, or use the cryptography backend.')
def bigint_to_int(self, bn):
'''Convert OpenSSL BIGINT to Python integer'''
if bn == OpenSSL._util.ffi.NULL:
return None
hexstr = OpenSSL._util.lib.BN_bn2hex(bn)
try:
return int(OpenSSL._util.ffi.string(hexstr), 16)
finally:
OpenSSL._util.lib.OPENSSL_free(hexstr)
def _get_key_info(self):
key_public_data = dict()
key_private_data = dict()
openssl_key_type = self.key.type()
try_fallback = True
if crypto.TYPE_RSA == openssl_key_type:
key_type = 'RSA'
key_public_data['size'] = self.key.bits()
try:
# Use OpenSSL directly to extract key data
key = OpenSSL._util.lib.EVP_PKEY_get1_RSA(self.key._pkey)
key = OpenSSL._util.ffi.gc(key, OpenSSL._util.lib.RSA_free)
# OpenSSL 1.1 and newer have functions to extract the parameters
# from the EVP PKEY data structures. Older versions didn't have
# these getters, and it was common use to simply access the values
# directly. Since there's no guarantee that these data structures
# will still be accessible in the future, we use the getters for
# 1.1 and later, and directly access the values for 1.0.x and
# earlier.
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# Get modulus and exponents
n = OpenSSL._util.ffi.new("BIGNUM **")
e = OpenSSL._util.ffi.new("BIGNUM **")
d = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.RSA_get0_key(key, n, e, d)
key_public_data['modulus'] = self.bigint_to_int(n[0])
key_public_data['exponent'] = self.bigint_to_int(e[0])
key_private_data['exponent'] = self.bigint_to_int(d[0])
# Get factors
p = OpenSSL._util.ffi.new("BIGNUM **")
q = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.RSA_get0_factors(key, p, q)
key_private_data['p'] = self.bigint_to_int(p[0])
key_private_data['q'] = self.bigint_to_int(q[0])
else:
# Get modulus and exponents
key_public_data['modulus'] = self.bigint_to_int(key.n)
key_public_data['exponent'] = self.bigint_to_int(key.e)
key_private_data['exponent'] = self.bigint_to_int(key.d)
# Get factors
key_private_data['p'] = self.bigint_to_int(key.p)
key_private_data['q'] = self.bigint_to_int(key.q)
try_fallback = False
except AttributeError:
# Use fallback if available
pass
elif crypto.TYPE_DSA == openssl_key_type:
key_type = 'DSA'
key_public_data['size'] = self.key.bits()
try:
# Use OpenSSL directly to extract key data
key = OpenSSL._util.lib.EVP_PKEY_get1_DSA(self.key._pkey)
key = OpenSSL._util.ffi.gc(key, OpenSSL._util.lib.DSA_free)
# OpenSSL 1.1 and newer have functions to extract the parameters
# from the EVP PKEY data structures. Older versions didn't have
# these getters, and it was common use to simply access the values
# directly. Since there's no guarantee that these data structures
# will still be accessible in the future, we use the getters for
# 1.1 and later, and directly access the values for 1.0.x and
# earlier.
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# Get public parameters (primes and group element)
p = OpenSSL._util.ffi.new("BIGNUM **")
q = OpenSSL._util.ffi.new("BIGNUM **")
g = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.DSA_get0_pqg(key, p, q, g)
key_public_data['p'] = self.bigint_to_int(p[0])
key_public_data['q'] = self.bigint_to_int(q[0])
key_public_data['g'] = self.bigint_to_int(g[0])
# Get public and private key exponents
y = OpenSSL._util.ffi.new("BIGNUM **")
x = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.DSA_get0_key(key, y, x)
key_public_data['y'] = self.bigint_to_int(y[0])
key_private_data['x'] = self.bigint_to_int(x[0])
else:
# Get public parameters (primes and group element)
key_public_data['p'] = self.bigint_to_int(key.p)
key_public_data['q'] = self.bigint_to_int(key.q)
key_public_data['g'] = self.bigint_to_int(key.g)
# Get public and private key exponents
key_public_data['y'] = self.bigint_to_int(key.pub_key)
key_private_data['x'] = self.bigint_to_int(key.priv_key)
try_fallback = False
except AttributeError:
# Use fallback if available
pass
else:
# Return 'unknown'
key_type = 'unknown ({0})'.format(self.key.type())
# If needed and if possible, fall back to cryptography
if try_fallback and PYOPENSSL_VERSION >= LooseVersion('16.1.0') and CRYPTOGRAPHY_FOUND:
return _get_cryptography_key_info(self.key.to_cryptography_key())
return key_type, key_public_data, key_private_data
def _is_key_consistent(self, key_public_data, key_private_data):
openssl_key_type = self.key.type()
if crypto.TYPE_RSA == openssl_key_type:
try:
return self.key.check()
except crypto.Error:
# OpenSSL error means that key is not consistent
return False
if crypto.TYPE_DSA == openssl_key_type:
result = _check_dsa_consistency(key_public_data, key_private_data)
if result is not None:
return result
signature = crypto.sign(self.key, SIGNATURE_TEST_DATA, 'sha256')
# Verify wants a cert (where it can get the public key from)
cert = crypto.X509()
cert.set_pubkey(self.key)
try:
crypto.verify(cert, signature, SIGNATURE_TEST_DATA, 'sha256')
return True
except crypto.Error:
return False
# If needed and if possible, fall back to cryptography
if PYOPENSSL_VERSION >= LooseVersion('16.1.0') and CRYPTOGRAPHY_FOUND:
return _is_cryptography_key_consistent(self.key.to_cryptography_key(), key_public_data, key_private_data)
return None
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path'),
content=dict(type='str'),
passphrase=dict(type='str', no_log=True),
return_private_key_data=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
),
required_one_of=(
['path', 'content'],
),
mutually_exclusive=(
['path', 'content'],
),
supports_check_mode=True,
)
try:
if module.params['path'] is not None:
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detect what backend we can use
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# If cryptography is available we'll use it
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Fail if no backend has been found
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
privatekey = PrivateKeyInfoPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
privatekey = PrivateKeyInfoCryptography(module)
result = privatekey.get_info()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/bad_4295_3 |
crossvul-python_data_bad_4295_6 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: x509_certificate
short_description: Generate and/or check OpenSSL certificates
description:
- This module allows one to (re)generate OpenSSL certificates.
- It implements a notion of provider (ie. C(selfsigned), C(ownca), C(acme), C(assertonly), C(entrust))
for your certificate.
- The C(assertonly) provider is intended for use cases where one is only interested in
checking properties of a supplied certificate. Please note that this provider has been
deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0. See the examples on how
to emulate C(assertonly) usage with M(community.crypto.x509_certificate_info),
M(community.crypto.openssl_csr_info), M(community.crypto.openssl_privatekey_info) and
M(ansible.builtin.assert). This also allows more flexible checks than
the ones offered by the C(assertonly) provider.
- The C(ownca) provider is intended for generating OpenSSL certificate signed with your own
CA (Certificate Authority) certificate (self-signed certificate).
- Many properties that can be specified in this module are for validation of an
existing or newly generated certificate. The proper place to specify them, if you
want to receive a certificate with these properties is a CSR (Certificate Signing Request).
- "Please note that the module regenerates existing certificate if it doesn't match the module's
options, or if it seems to be corrupt. If you are concerned that this could overwrite
your existing certificate, consider using the I(backup) option."
- It uses the pyOpenSSL or cryptography python library to interact with OpenSSL.
- If both the cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with C(select_crypto_backend)).
Please note that the PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
- Note that this module was called C(openssl_certificate) when included directly in Ansible up to version 2.9.
When moved to the collection C(community.crypto), it was renamed to
M(community.crypto.x509_certificate). From Ansible 2.10 on, it can still be used by the
old short name (or by C(ansible.builtin.openssl_certificate)), which redirects to
C(community.crypto.x509_certificate). When using FQCNs or when using the
L(collections,https://docs.ansible.com/ansible/latest/user_guide/collections_using.html#using-collections-in-a-playbook)
keyword, the new name M(community.crypto.x509_certificate) should be used to avoid
a deprecation warning.
requirements:
- PyOpenSSL >= 0.15 or cryptography >= 1.6 (if using C(selfsigned) or C(assertonly) provider)
- acme-tiny >= 4.0.0 (if using the C(acme) provider)
author:
- Yanis Guenane (@Spredzy)
- Markus Teufelberger (@MarkusTeufelberger)
options:
state:
description:
- Whether the certificate should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
path:
description:
- Remote absolute path where the generated certificate file should be created or is already located.
type: path
required: true
provider:
description:
- Name of the provider to use to generate/retrieve the OpenSSL certificate.
- The C(assertonly) provider will not generate files and fail if the certificate file is missing.
- The C(assertonly) provider has been deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
Please see the examples on how to emulate it with
M(community.crypto.x509_certificate_info), M(community.crypto.openssl_csr_info),
M(community.crypto.openssl_privatekey_info) and M(ansible.builtin.assert).
- "The C(entrust) provider was added for Ansible 2.9 and requires credentials for the
L(Entrust Certificate Services,https://www.entrustdatacard.com/products/categories/ssl-certificates) (ECS) API."
- Required if I(state) is C(present).
type: str
choices: [ acme, assertonly, entrust, ownca, selfsigned ]
force:
description:
- Generate the certificate, even if it already exists.
type: bool
default: no
csr_path:
description:
- Path to the Certificate Signing Request (CSR) used to generate this certificate.
- This is not required in C(assertonly) mode.
- This is mutually exclusive with I(csr_content).
type: path
csr_content:
description:
- Content of the Certificate Signing Request (CSR) used to generate this certificate.
- This is not required in C(assertonly) mode.
- This is mutually exclusive with I(csr_path).
type: str
version_added: '1.0.0'
privatekey_path:
description:
- Path to the private key to use when signing the certificate.
- This is mutually exclusive with I(privatekey_content).
type: path
privatekey_content:
description:
- Path to the private key to use when signing the certificate.
- This is mutually exclusive with I(privatekey_path).
type: str
version_added: '1.0.0'
privatekey_passphrase:
description:
- The passphrase for the I(privatekey_path) resp. I(privatekey_content).
- This is required if the private key is password protected.
type: str
selfsigned_version:
description:
- Version of the C(selfsigned) certificate.
- Nowadays it should almost always be C(3).
- This is only used by the C(selfsigned) provider.
type: int
default: 3
selfsigned_digest:
description:
- Digest algorithm to be used when self-signing the certificate.
- This is only used by the C(selfsigned) provider.
type: str
default: sha256
selfsigned_not_before:
description:
- The point in time the certificate is valid from.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will start being valid from now.
- This is only used by the C(selfsigned) provider.
type: str
default: +0s
aliases: [ selfsigned_notBefore ]
selfsigned_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will stop being valid 10 years from now.
- This is only used by the C(selfsigned) provider.
- On macOS 10.15 and onwards, TLS server certificates must have a validity period of 825 days or fewer.
Please see U(https://support.apple.com/en-us/HT210176) for more details.
type: str
default: +3650d
aliases: [ selfsigned_notAfter ]
selfsigned_create_subject_key_identifier:
description:
- Whether to create the Subject Key Identifier (SKI) from the public key.
- A value of C(create_if_not_provided) (default) only creates a SKI when the CSR does not
provide one.
- A value of C(always_create) always creates a SKI. If the CSR provides one, that one is
ignored.
- A value of C(never_create) never creates a SKI. If the CSR provides one, that one is used.
- This is only used by the C(selfsigned) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: str
choices: [create_if_not_provided, always_create, never_create]
default: create_if_not_provided
ownca_path:
description:
- Remote absolute path of the CA (Certificate Authority) certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_content).
type: path
ownca_content:
description:
- Content of the CA (Certificate Authority) certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_path).
type: str
version_added: '1.0.0'
ownca_privatekey_path:
description:
- Path to the CA (Certificate Authority) private key to use when signing the certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_privatekey_content).
type: path
ownca_privatekey_content:
description:
- Path to the CA (Certificate Authority) private key to use when signing the certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_privatekey_path).
type: str
version_added: '1.0.0'
ownca_privatekey_passphrase:
description:
- The passphrase for the I(ownca_privatekey_path) resp. I(ownca_privatekey_content).
- This is only used by the C(ownca) provider.
type: str
ownca_digest:
description:
- The digest algorithm to be used for the C(ownca) certificate.
- This is only used by the C(ownca) provider.
type: str
default: sha256
ownca_version:
description:
- The version of the C(ownca) certificate.
- Nowadays it should almost always be C(3).
- This is only used by the C(ownca) provider.
type: int
default: 3
ownca_not_before:
description:
- The point in time the certificate is valid from.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will start being valid from now.
- This is only used by the C(ownca) provider.
type: str
default: +0s
ownca_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will stop being valid 10 years from now.
- This is only used by the C(ownca) provider.
- On macOS 10.15 and onwards, TLS server certificates must have a validity period of 825 days or fewer.
Please see U(https://support.apple.com/en-us/HT210176) for more details.
type: str
default: +3650d
ownca_create_subject_key_identifier:
description:
- Whether to create the Subject Key Identifier (SKI) from the public key.
- A value of C(create_if_not_provided) (default) only creates a SKI when the CSR does not
provide one.
- A value of C(always_create) always creates a SKI. If the CSR provides one, that one is
ignored.
- A value of C(never_create) never creates a SKI. If the CSR provides one, that one is used.
- This is only used by the C(ownca) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: str
choices: [create_if_not_provided, always_create, never_create]
default: create_if_not_provided
ownca_create_authority_key_identifier:
description:
- Create a Authority Key Identifier from the CA's certificate. If the CSR provided
a authority key identifier, it is ignored.
- The Authority Key Identifier is generated from the CA certificate's Subject Key Identifier,
if available. If it is not available, the CA certificate's public key will be used.
- This is only used by the C(ownca) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: bool
default: yes
acme_accountkey_path:
description:
- The path to the accountkey for the C(acme) provider.
- This is only used by the C(acme) provider.
type: path
acme_challenge_path:
description:
- The path to the ACME challenge directory that is served on U(http://<HOST>:80/.well-known/acme-challenge/)
- This is only used by the C(acme) provider.
type: path
acme_chain:
description:
- Include the intermediate certificate to the generated certificate
- This is only used by the C(acme) provider.
- Note that this is only available for older versions of C(acme-tiny).
New versions include the chain automatically, and setting I(acme_chain) to C(yes) results in an error.
type: bool
default: no
acme_directory:
description:
- "The ACME directory to use. You can use any directory that supports the ACME protocol, such as Buypass or Let's Encrypt."
- "Let's Encrypt recommends using their staging server while developing jobs. U(https://letsencrypt.org/docs/staging-environment/)."
type: str
default: https://acme-v02.api.letsencrypt.org/directory
version_added: '1.0.0'
signature_algorithms:
description:
- A list of algorithms that you would accept the certificate to be signed with
(e.g. ['sha256WithRSAEncryption', 'sha512WithRSAEncryption']).
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
issuer:
description:
- The key/value pairs that must be present in the issuer name field of the certificate.
- If you need to specify more than one value with the same key, use a list as value.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: dict
issuer_strict:
description:
- If set to C(yes), the I(issuer) field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
subject:
description:
- The key/value pairs that must be present in the subject name field of the certificate.
- If you need to specify more than one value with the same key, use a list as value.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: dict
subject_strict:
description:
- If set to C(yes), the I(subject) field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
has_expired:
description:
- Checks if the certificate is expired/not expired at the time the module is executed.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
version:
description:
- The version of the certificate.
- Nowadays it should almost always be 3.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: int
valid_at:
description:
- The certificate must be valid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
invalid_at:
description:
- The certificate must be invalid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
not_before:
description:
- The certificate must start to become valid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
aliases: [ notBefore ]
not_after:
description:
- The certificate must expire at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
aliases: [ notAfter ]
valid_in:
description:
- The certificate must still be valid at this relative time offset from now.
- Valid format is C([+-]timespec | number_of_seconds) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using this parameter, this module is NOT idempotent.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
key_usage:
description:
- The I(key_usage) extension field must contain all these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ keyUsage ]
key_usage_strict:
description:
- If set to C(yes), the I(key_usage) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ keyUsage_strict ]
extended_key_usage:
description:
- The I(extended_key_usage) extension field must contain all these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ extendedKeyUsage ]
extended_key_usage_strict:
description:
- If set to C(yes), the I(extended_key_usage) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ extendedKeyUsage_strict ]
subject_alt_name:
description:
- The I(subject_alt_name) extension field must contain these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ subjectAltName ]
subject_alt_name_strict:
description:
- If set to C(yes), the I(subject_alt_name) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ subjectAltName_strict ]
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
backup:
description:
- Create a backup file including a timestamp so you can get the original
certificate back if you overwrote it with a new one by accident.
- This is not used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
entrust_cert_type:
description:
- Specify the type of certificate requested.
- This is only used by the C(entrust) provider.
type: str
default: STANDARD_SSL
choices: [ 'STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL', 'PRIVATE_SSL', 'PD_SSL', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT' ]
entrust_requester_email:
description:
- The email of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_requester_name:
description:
- The name of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_requester_phone:
description:
- The phone number of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_user:
description:
- The username for authentication to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_key:
description:
- The key (password) for authentication to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_client_cert_path:
description:
- The path to the client certificate used to authenticate to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: path
entrust_api_client_cert_key_path:
description:
- The path to the private key of the client certificate used to authenticate to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: path
entrust_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as an absolute timestamp.
- A valid absolute time format is C(ASN.1 TIME) such as C(2019-06-18).
- A valid relative time format is C([+-]timespec) where timespec can be an integer + C([w | d | h | m | s]), such as C(+365d) or C(+32w1d2h)).
- Time will always be interpreted as UTC.
- Note that only the date (day, month, year) is supported for specifying the expiry date of the issued certificate.
- The full date-time is adjusted to EST (GMT -5:00) before issuance, which may result in a certificate with an expiration date one day
earlier than expected if a relative time is used.
- The minimum certificate lifetime is 90 days, and maximum is three years.
- If this value is not specified, the certificate will stop being valid 365 days the date of issue.
- This is only used by the C(entrust) provider.
type: str
default: +365d
entrust_api_specification_path:
description:
- The path to the specification file defining the Entrust Certificate Services (ECS) API configuration.
- You can use this to keep a local copy of the specification to avoid downloading it every time the module is used.
- This is only used by the C(entrust) provider.
type: path
default: https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml
return_content:
description:
- If set to C(yes), will return the (current or generated) certificate's content as I(certificate).
type: bool
default: no
version_added: '1.0.0'
extends_documentation_fragment: files
notes:
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
- Date specified should be UTC. Minutes and seconds are mandatory.
- For security reason, when you use C(ownca) provider, you should NOT run
M(community.crypto.x509_certificate) on a target machine, but on a dedicated CA machine. It
is recommended not to store the CA private key on the target machine. Once signed, the
certificate can be moved to the target machine.
seealso:
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate a Self Signed OpenSSL certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
privatekey_path: /etc/ssl/private/ansible.com.pem
csr_path: /etc/ssl/csr/ansible.com.csr
provider: selfsigned
- name: Generate an OpenSSL certificate signed with your own CA certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
ownca_path: /etc/ssl/crt/ansible_CA.crt
ownca_privatekey_path: /etc/ssl/private/ansible_CA.pem
provider: ownca
- name: Generate a Let's Encrypt Certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: acme
acme_accountkey_path: /etc/ssl/private/ansible.com.pem
acme_challenge_path: /etc/ssl/challenges/ansible.com/
- name: Force (re-)generate a new Let's Encrypt Certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: acme
acme_accountkey_path: /etc/ssl/private/ansible.com.pem
acme_challenge_path: /etc/ssl/challenges/ansible.com/
force: yes
- name: Generate an Entrust certificate via the Entrust Certificate Services (ECS) API
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: entrust
entrust_requester_name: Jo Doe
entrust_requester_email: jdoe@ansible.com
entrust_requester_phone: 555-555-5555
entrust_cert_type: STANDARD_SSL
entrust_api_user: apiusername
entrust_api_key: a^lv*32!cd9LnT
entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt
entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-key.crt
entrust_api_specification_path: /etc/ssl/entrust/api-docs/cms-api-2.1.0.yaml
# The following example shows one assertonly usage using all existing options for
# assertonly, and shows how to emulate the behavior with the x509_certificate_info,
# openssl_csr_info, openssl_privatekey_info and assert modules:
- community.crypto.x509_certificate:
provider: assertonly
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
privatekey_path: /etc/ssl/csr/ansible.com.key
signature_algorithms:
- sha256WithRSAEncryption
- sha512WithRSAEncryption
subject:
commonName: ansible.com
subject_strict: yes
issuer:
commonName: ansible.com
issuer_strict: yes
has_expired: no
version: 3
key_usage:
- Data Encipherment
key_usage_strict: yes
extended_key_usage:
- DVCS
extended_key_usage_strict: yes
subject_alt_name:
- dns:ansible.com
subject_alt_name_strict: yes
not_before: 20190331202428Z
not_after: 20190413202428Z
valid_at: "+1d10h"
invalid_at: 20200331202428Z
valid_in: 10 # in ten seconds
- community.crypto.x509_certificate_info:
path: /etc/ssl/crt/ansible.com.crt
# for valid_at, invalid_at and valid_in
valid_at:
one_day_ten_hours: "+1d10h"
fixed_timestamp: 20200331202428Z
ten_seconds: "+10"
register: result
- community.crypto.openssl_csr_info:
# Verifies that the CSR signature is valid; module will fail if not
path: /etc/ssl/csr/ansible.com.csr
register: result_csr
- community.crypto.openssl_privatekey_info:
path: /etc/ssl/csr/ansible.com.key
register: result_privatekey
- assert:
that:
# When private key is specified for assertonly, this will be checked:
- result.public_key == result_privatekey.public_key
# When CSR is specified for assertonly, this will be checked:
- result.public_key == result_csr.public_key
- result.subject_ordered == result_csr.subject_ordered
- result.extensions_by_oid == result_csr.extensions_by_oid
# signature_algorithms check
- "result.signature_algorithm == 'sha256WithRSAEncryption' or result.signature_algorithm == 'sha512WithRSAEncryption'"
# subject and subject_strict
- "result.subject.commonName == 'ansible.com'"
- "result.subject | length == 1" # the number must be the number of entries you check for
# issuer and issuer_strict
- "result.issuer.commonName == 'ansible.com'"
- "result.issuer | length == 1" # the number must be the number of entries you check for
# has_expired
- not result.expired
# version
- result.version == 3
# key_usage and key_usage_strict
- "'Data Encipherment' in result.key_usage"
- "result.key_usage | length == 1" # the number must be the number of entries you check for
# extended_key_usage and extended_key_usage_strict
- "'DVCS' in result.extended_key_usage"
- "result.extended_key_usage | length == 1" # the number must be the number of entries you check for
# subject_alt_name and subject_alt_name_strict
- "'dns:ansible.com' in result.subject_alt_name"
- "result.subject_alt_name | length == 1" # the number must be the number of entries you check for
# not_before and not_after
- "result.not_before == '20190331202428Z'"
- "result.not_after == '20190413202428Z'"
# valid_at, invalid_at and valid_in
- "result.valid_at.one_day_ten_hours" # for valid_at
- "not result.valid_at.fixed_timestamp" # for invalid_at
- "result.valid_at.ten_seconds" # for valid_in
# Examples for some checks one could use the assertonly provider for:
# (Please note that assertonly has been deprecated!)
# How to use the assertonly provider to implement and trigger your own custom certificate generation workflow:
- name: Check if a certificate is currently still valid, ignoring failures
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
has_expired: no
ignore_errors: yes
register: validity_check
- name: Run custom task(s) to get a new, valid certificate in case the initial check failed
command: superspecialSSL recreate /etc/ssl/crt/example.com.crt
when: validity_check.failed
- name: Check the new certificate again for validity with the same parameters, this time failing the play if it is still invalid
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
has_expired: no
when: validity_check.failed
# Some other checks that assertonly could be used for:
- name: Verify that an existing certificate was issued by the Let's Encrypt CA and is currently still valid
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
issuer:
O: Let's Encrypt
has_expired: no
- name: Ensure that a certificate uses a modern signature algorithm (no SHA1, MD5 or DSA)
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
signature_algorithms:
- sha224WithRSAEncryption
- sha256WithRSAEncryption
- sha384WithRSAEncryption
- sha512WithRSAEncryption
- sha224WithECDSAEncryption
- sha256WithECDSAEncryption
- sha384WithECDSAEncryption
- sha512WithECDSAEncryption
- name: Ensure that the existing certificate belongs to the specified private key
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
privatekey_path: /etc/ssl/private/example.com.pem
provider: assertonly
- name: Ensure that the existing certificate is still valid at the winter solstice 2017
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
valid_at: 20171221162800Z
- name: Ensure that the existing certificate is still valid 2 weeks (1209600 seconds) from now
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
valid_in: 1209600
- name: Ensure that the existing certificate is only used for digital signatures and encrypting other keys
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
key_usage:
- digitalSignature
- keyEncipherment
key_usage_strict: true
- name: Ensure that the existing certificate can be used for client authentication
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
extended_key_usage:
- clientAuth
- name: Ensure that the existing certificate can only be used for client authentication and time stamping
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
extended_key_usage:
- clientAuth
- 1.3.6.1.5.5.7.3.8
extended_key_usage_strict: true
- name: Ensure that the existing certificate has a certain domain in its subjectAltName
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
subject_alt_name:
- www.example.com
- test.example.com
'''
RETURN = r'''
filename:
description: Path to the generated certificate.
returned: changed or success
type: str
sample: /etc/ssl/crt/www.ansible.com.crt
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/www.ansible.com.crt.2019-03-09@11:22~
certificate:
description: The (current or generated) certificate's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import abc
import datetime
import time
import os
import tempfile
import traceback
from distutils.version import LooseVersion
from random import randrange
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible_collections.community.crypto.plugins.module_utils.compat import ipaddress as compat_ipaddress
from ansible_collections.community.crypto.plugins.module_utils.ecs.api import ECSClient, RestOperationException, SessionConfigurationException
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate,
load_certificate_request,
parse_name_field,
get_relative_time_option,
select_message_digest,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_compare_public_keys,
cryptography_get_name,
cryptography_name_to_oid,
cryptography_key_needs_digest_for_signing,
cryptography_parse_key_usage_params,
cryptography_serial_number_of_cert,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.pyopenssl_support import (
pyopenssl_normalize_name_attribute,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.6'
MINIMAL_PYOPENSSL_VERSION = '0.15'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.x509 import NameAttribute, Name
from cryptography.x509.oid import NameOID
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class CertificateError(OpenSSLObjectError):
pass
class Certificate(OpenSSLObject):
def __init__(self, module, backend):
super(Certificate, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.provider = module.params['provider']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.csr_path = module.params['csr_path']
self.csr_content = module.params['csr_content']
if self.csr_content is not None:
self.csr_content = self.csr_content.encode('utf-8')
self.cert = None
self.privatekey = None
self.csr = None
self.backend = backend
self.module = module
self.return_content = module.params['return_content']
# The following are default values which make sure check() works as
# before if providers do not explicitly change these properties.
self.create_subject_key_identifier = 'never_create'
self.create_authority_key_identifier = False
self.backup = module.params['backup']
self.backup_file = None
def _validate_privatekey(self):
if self.backend == 'pyopenssl':
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey(self.privatekey)
ctx.use_certificate(self.cert)
try:
ctx.check_privatekey()
return True
except OpenSSL.SSL.Error:
return False
elif self.backend == 'cryptography':
return cryptography_compare_public_keys(self.cert.public_key(), self.privatekey.public_key())
def _validate_csr(self):
if self.backend == 'pyopenssl':
# Verify that CSR is signed by certificate's private key
try:
self.csr.verify(self.cert.get_pubkey())
except OpenSSL.crypto.Error:
return False
# Check subject
if self.csr.get_subject() != self.cert.get_subject():
return False
# Check extensions
csr_extensions = self.csr.get_extensions()
cert_extension_count = self.cert.get_extension_count()
if len(csr_extensions) != cert_extension_count:
return False
for extension_number in range(0, cert_extension_count):
cert_extension = self.cert.get_extension(extension_number)
csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
if cert_extension.get_data() != list(csr_extension)[0].get_data():
return False
return True
elif self.backend == 'cryptography':
# Verify that CSR is signed by certificate's private key
if not self.csr.is_signature_valid:
return False
if not cryptography_compare_public_keys(self.csr.public_key(), self.cert.public_key()):
return False
# Check subject
if self.csr.subject != self.cert.subject:
return False
# Check extensions
cert_exts = list(self.cert.extensions)
csr_exts = list(self.csr.extensions)
if self.create_subject_key_identifier != 'never_create':
# Filter out SubjectKeyIdentifier extension before comparison
cert_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), cert_exts))
csr_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), csr_exts))
if self.create_authority_key_identifier:
# Filter out AuthorityKeyIdentifier extension before comparison
cert_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), cert_exts))
csr_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), csr_exts))
if len(cert_exts) != len(csr_exts):
return False
for cert_ext in cert_exts:
try:
csr_ext = self.csr.extensions.get_extension_for_oid(cert_ext.oid)
if cert_ext != csr_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(Certificate, self).remove(module)
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(Certificate, self).check(module, perms_required)
if not state_and_perms:
return False
try:
self.cert = load_certificate(self.path, backend=self.backend)
except Exception as dummy:
return False
if self.privatekey_path or self.privatekey_content:
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
raise CertificateError(exc)
if not self._validate_privatekey():
return False
if self.csr_path or self.csr_content:
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
if not self._validate_csr():
return False
# Check SubjectKeyIdentifier
if self.backend == 'cryptography' and self.create_subject_key_identifier != 'never_create':
# Get hold of certificate's SKI
try:
ext = self.cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound as dummy:
return False
# Get hold of CSR's SKI for 'create_if_not_provided'
csr_ext = None
if self.create_subject_key_identifier == 'create_if_not_provided':
try:
csr_ext = self.csr.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound as dummy:
pass
if csr_ext is None:
# If CSR had no SKI, or we chose to ignore it ('always_create'), compare with created SKI
if ext.value.digest != x509.SubjectKeyIdentifier.from_public_key(self.cert.public_key()).digest:
return False
else:
# If CSR had SKI and we didn't ignore it ('create_if_not_provided'), compare SKIs
if ext.value.digest != csr_ext.value.digest:
return False
return True
class CertificateAbsent(Certificate):
def __init__(self, module):
super(CertificateAbsent, self).__init__(module, 'cryptography') # backend doesn't matter
def generate(self, module):
pass
def dump(self, check_mode=False):
# Use only for absent
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
result['certificate'] = None
return result
class SelfSignedCertificateCryptography(Certificate):
"""Generate the self-signed certificate, using the cryptography backend"""
def __init__(self, module):
super(SelfSignedCertificateCryptography, self).__init__(module, 'cryptography')
self.create_subject_key_identifier = module.params['selfsigned_create_subject_key_identifier']
self.notBefore = get_relative_time_option(module.params['selfsigned_not_before'], 'selfsigned_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['selfsigned_not_after'], 'selfsigned_not_after', backend=self.backend)
self.digest = select_message_digest(module.params['selfsigned_digest'])
self.version = module.params['selfsigned_version']
self.serial_number = x509.random_serial_number()
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key file {0} does not exist'.format(self.privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
self._module = module
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=to_native(exc))
if cryptography_key_needs_digest_for_signing(self.privatekey):
if self.digest is None:
raise CertificateError(
'The digest %s is not supported with the cryptography backend' % module.params['selfsigned_digest']
)
else:
self.digest = None
def generate(self, module):
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key %s does not exist' % self.privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
try:
cert_builder = x509.CertificateBuilder()
cert_builder = cert_builder.subject_name(self.csr.subject)
cert_builder = cert_builder.issuer_name(self.csr.subject)
cert_builder = cert_builder.serial_number(self.serial_number)
cert_builder = cert_builder.not_valid_before(self.notBefore)
cert_builder = cert_builder.not_valid_after(self.notAfter)
cert_builder = cert_builder.public_key(self.privatekey.public_key())
has_ski = False
for extension in self.csr.extensions:
if isinstance(extension.value, x509.SubjectKeyIdentifier):
if self.create_subject_key_identifier == 'always_create':
continue
has_ski = True
cert_builder = cert_builder.add_extension(extension.value, critical=extension.critical)
if not has_ski and self.create_subject_key_identifier != 'never_create':
cert_builder = cert_builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()),
critical=False
)
except ValueError as e:
raise CertificateError(str(e))
try:
certificate = cert_builder.sign(
private_key=self.privatekey, algorithm=self.digest,
backend=default_backend()
)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and self.digest is None:
module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
self.cert = certificate
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, certificate.public_bytes(Encoding.PEM))
self.changed = True
else:
self.cert = load_certificate(self.path, backend=self.backend)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.notAfter.strftime("%Y%m%d%H%M%SZ"),
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.not_valid_before.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.cert.not_valid_after.strftime("%Y%m%d%H%M%SZ"),
'serial_number': cryptography_serial_number_of_cert(self.cert),
})
return result
def generate_serial_number():
"""Generate a serial number for a certificate"""
while True:
result = randrange(0, 1 << 160)
if result >= 1000:
return result
class SelfSignedCertificate(Certificate):
"""Generate the self-signed certificate."""
def __init__(self, module):
super(SelfSignedCertificate, self).__init__(module, 'pyopenssl')
if module.params['selfsigned_create_subject_key_identifier'] != 'create_if_not_provided':
module.fail_json(msg='selfsigned_create_subject_key_identifier cannot be used with the pyOpenSSL backend!')
self.notBefore = get_relative_time_option(module.params['selfsigned_not_before'], 'selfsigned_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['selfsigned_not_after'], 'selfsigned_not_after', backend=self.backend)
self.digest = module.params['selfsigned_digest']
self.version = module.params['selfsigned_version']
self.serial_number = generate_serial_number()
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key file {0} does not exist'.format(self.privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
)
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
def generate(self, module):
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key %s does not exist' % self.privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert = crypto.X509()
cert.set_serial_number(self.serial_number)
cert.set_notBefore(to_bytes(self.notBefore))
cert.set_notAfter(to_bytes(self.notAfter))
cert.set_subject(self.csr.get_subject())
cert.set_issuer(self.csr.get_subject())
cert.set_version(self.version - 1)
cert.set_pubkey(self.csr.get_pubkey())
cert.add_extensions(self.csr.get_extensions())
cert.sign(self.privatekey, self.digest)
self.cert = cert
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore,
'notAfter': self.notAfter,
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.get_notBefore(),
'notAfter': self.cert.get_notAfter(),
'serial_number': self.cert.get_serial_number(),
})
return result
class OwnCACertificateCryptography(Certificate):
"""Generate the own CA certificate. Using the cryptography backend"""
def __init__(self, module):
super(OwnCACertificateCryptography, self).__init__(module, 'cryptography')
self.create_subject_key_identifier = module.params['ownca_create_subject_key_identifier']
self.create_authority_key_identifier = module.params['ownca_create_authority_key_identifier']
self.notBefore = get_relative_time_option(module.params['ownca_not_before'], 'ownca_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['ownca_not_after'], 'ownca_not_after', backend=self.backend)
self.digest = select_message_digest(module.params['ownca_digest'])
self.version = module.params['ownca_version']
self.serial_number = x509.random_serial_number()
self.ca_cert_path = module.params['ownca_path']
self.ca_cert_content = module.params['ownca_content']
if self.ca_cert_content is not None:
self.ca_cert_content = self.ca_cert_content.encode('utf-8')
self.ca_privatekey_path = module.params['ownca_privatekey_path']
self.ca_privatekey_content = module.params['ownca_privatekey_content']
if self.ca_privatekey_content is not None:
self.ca_privatekey_content = self.ca_privatekey_content.encode('utf-8')
self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate file {0} does not exist'.format(self.ca_cert_path)
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key file {0} does not exist'.format(self.ca_privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
self.ca_cert = load_certificate(
path=self.ca_cert_path,
content=self.ca_cert_content,
backend=self.backend
)
try:
self.ca_private_key = load_privatekey(
path=self.ca_privatekey_path,
content=self.ca_privatekey_content,
passphrase=self.ca_privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
if cryptography_key_needs_digest_for_signing(self.ca_private_key):
if self.digest is None:
raise CertificateError(
'The digest %s is not supported with the cryptography backend' % module.params['ownca_digest']
)
else:
self.digest = None
def generate(self, module):
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate %s does not exist' % self.ca_cert_path
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key %s does not exist' % self.ca_privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert_builder = x509.CertificateBuilder()
cert_builder = cert_builder.subject_name(self.csr.subject)
cert_builder = cert_builder.issuer_name(self.ca_cert.subject)
cert_builder = cert_builder.serial_number(self.serial_number)
cert_builder = cert_builder.not_valid_before(self.notBefore)
cert_builder = cert_builder.not_valid_after(self.notAfter)
cert_builder = cert_builder.public_key(self.csr.public_key())
has_ski = False
for extension in self.csr.extensions:
if isinstance(extension.value, x509.SubjectKeyIdentifier):
if self.create_subject_key_identifier == 'always_create':
continue
has_ski = True
if self.create_authority_key_identifier and isinstance(extension.value, x509.AuthorityKeyIdentifier):
continue
cert_builder = cert_builder.add_extension(extension.value, critical=extension.critical)
if not has_ski and self.create_subject_key_identifier != 'never_create':
cert_builder = cert_builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(self.csr.public_key()),
critical=False
)
if self.create_authority_key_identifier:
try:
ext = self.ca_cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
cert_builder = cert_builder.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext.value)
if CRYPTOGRAPHY_VERSION >= LooseVersion('2.7') else
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext),
critical=False
)
except cryptography.x509.ExtensionNotFound:
cert_builder = cert_builder.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_cert.public_key()),
critical=False
)
try:
certificate = cert_builder.sign(
private_key=self.ca_private_key, algorithm=self.digest,
backend=default_backend()
)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and self.digest is None:
module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
self.cert = certificate
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, certificate.public_bytes(Encoding.PEM))
self.changed = True
else:
self.cert = load_certificate(self.path, backend=self.backend)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
if not super(OwnCACertificateCryptography, self).check(module, perms_required):
return False
# Check AuthorityKeyIdentifier
if self.create_authority_key_identifier:
try:
ext = self.ca_cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
expected_ext = (
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext.value)
if CRYPTOGRAPHY_VERSION >= LooseVersion('2.7') else
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext)
)
except cryptography.x509.ExtensionNotFound:
expected_ext = x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_cert.public_key())
try:
ext = self.cert.extensions.get_extension_for_class(x509.AuthorityKeyIdentifier)
if ext.value != expected_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
'ca_cert': self.ca_cert_path,
'ca_privatekey': self.ca_privatekey_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.notAfter.strftime("%Y%m%d%H%M%SZ"),
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.not_valid_before.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.cert.not_valid_after.strftime("%Y%m%d%H%M%SZ"),
'serial_number': cryptography_serial_number_of_cert(self.cert),
})
return result
class OwnCACertificate(Certificate):
"""Generate the own CA certificate."""
def __init__(self, module):
super(OwnCACertificate, self).__init__(module, 'pyopenssl')
self.notBefore = get_relative_time_option(module.params['ownca_not_before'], 'ownca_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['ownca_not_after'], 'ownca_not_after', backend=self.backend)
self.digest = module.params['ownca_digest']
self.version = module.params['ownca_version']
self.serial_number = generate_serial_number()
if module.params['ownca_create_subject_key_identifier'] != 'create_if_not_provided':
module.fail_json(msg='ownca_create_subject_key_identifier cannot be used with the pyOpenSSL backend!')
if module.params['ownca_create_authority_key_identifier']:
module.warn('ownca_create_authority_key_identifier is ignored by the pyOpenSSL backend!')
self.ca_cert_path = module.params['ownca_path']
self.ca_cert_content = module.params['ownca_content']
if self.ca_cert_content is not None:
self.ca_cert_content = self.ca_cert_content.encode('utf-8')
self.ca_privatekey_path = module.params['ownca_privatekey_path']
self.ca_privatekey_content = module.params['ownca_privatekey_content']
if self.ca_privatekey_content is not None:
self.ca_privatekey_content = self.ca_privatekey_content.encode('utf-8')
self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate file {0} does not exist'.format(self.ca_cert_path)
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key file {0} does not exist'.format(self.ca_privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
)
self.ca_cert = load_certificate(
path=self.ca_cert_path,
content=self.ca_cert_content,
)
try:
self.ca_privatekey = load_privatekey(
path=self.ca_privatekey_path,
content=self.ca_privatekey_content,
passphrase=self.ca_privatekey_passphrase
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
def generate(self, module):
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate %s does not exist' % self.ca_cert_path
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key %s does not exist' % self.ca_privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert = crypto.X509()
cert.set_serial_number(self.serial_number)
cert.set_notBefore(to_bytes(self.notBefore))
cert.set_notAfter(to_bytes(self.notAfter))
cert.set_subject(self.csr.get_subject())
cert.set_issuer(self.ca_cert.get_subject())
cert.set_version(self.version - 1)
cert.set_pubkey(self.csr.get_pubkey())
cert.add_extensions(self.csr.get_extensions())
cert.sign(self.ca_privatekey, self.digest)
self.cert = cert
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
'ca_cert': self.ca_cert_path,
'ca_privatekey': self.ca_privatekey_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore,
'notAfter': self.notAfter,
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.get_notBefore(),
'notAfter': self.cert.get_notAfter(),
'serial_number': self.cert.get_serial_number(),
})
return result
def compare_sets(subset, superset, equality=False):
if equality:
return set(subset) == set(superset)
else:
return all(x in superset for x in subset)
def compare_dicts(subset, superset, equality=False):
if equality:
return subset == superset
else:
return all(superset.get(x) == v for x, v in subset.items())
NO_EXTENSION = 'no extension'
class AssertOnlyCertificateBase(Certificate):
def __init__(self, module, backend):
super(AssertOnlyCertificateBase, self).__init__(module, backend)
self.signature_algorithms = module.params['signature_algorithms']
if module.params['subject']:
self.subject = parse_name_field(module.params['subject'])
else:
self.subject = []
self.subject_strict = module.params['subject_strict']
if module.params['issuer']:
self.issuer = parse_name_field(module.params['issuer'])
else:
self.issuer = []
self.issuer_strict = module.params['issuer_strict']
self.has_expired = module.params['has_expired']
self.version = module.params['version']
self.key_usage = module.params['key_usage']
self.key_usage_strict = module.params['key_usage_strict']
self.extended_key_usage = module.params['extended_key_usage']
self.extended_key_usage_strict = module.params['extended_key_usage_strict']
self.subject_alt_name = module.params['subject_alt_name']
self.subject_alt_name_strict = module.params['subject_alt_name_strict']
self.not_before = module.params['not_before']
self.not_after = module.params['not_after']
self.valid_at = module.params['valid_at']
self.invalid_at = module.params['invalid_at']
self.valid_in = module.params['valid_in']
if self.valid_in and not self.valid_in.startswith("+") and not self.valid_in.startswith("-"):
try:
int(self.valid_in)
except ValueError:
module.fail_json(msg='The supplied value for "valid_in" (%s) is not an integer or a valid timespec' % self.valid_in)
self.valid_in = "+" + self.valid_in + "s"
# Load objects
self.cert = load_certificate(self.path, backend=self.backend)
if self.privatekey_path is not None or self.privatekey_content is not None:
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
raise CertificateError(exc)
if self.csr_path is not None or self.csr_content is not None:
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
@abc.abstractmethod
def _validate_privatekey(self):
pass
@abc.abstractmethod
def _validate_csr_signature(self):
pass
@abc.abstractmethod
def _validate_csr_subject(self):
pass
@abc.abstractmethod
def _validate_csr_extensions(self):
pass
@abc.abstractmethod
def _validate_signature_algorithms(self):
pass
@abc.abstractmethod
def _validate_subject(self):
pass
@abc.abstractmethod
def _validate_issuer(self):
pass
@abc.abstractmethod
def _validate_has_expired(self):
pass
@abc.abstractmethod
def _validate_version(self):
pass
@abc.abstractmethod
def _validate_key_usage(self):
pass
@abc.abstractmethod
def _validate_extended_key_usage(self):
pass
@abc.abstractmethod
def _validate_subject_alt_name(self):
pass
@abc.abstractmethod
def _validate_not_before(self):
pass
@abc.abstractmethod
def _validate_not_after(self):
pass
@abc.abstractmethod
def _validate_valid_at(self):
pass
@abc.abstractmethod
def _validate_invalid_at(self):
pass
@abc.abstractmethod
def _validate_valid_in(self):
pass
def assertonly(self, module):
messages = []
if self.privatekey_path is not None or self.privatekey_content is not None:
if not self._validate_privatekey():
messages.append(
'Certificate %s and private key %s do not match' %
(self.path, self.privatekey_path or '(provided in module options)')
)
if self.csr_path is not None or self.csr_content is not None:
if not self._validate_csr_signature():
messages.append(
'Certificate %s and CSR %s do not match: private key mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if not self._validate_csr_subject():
messages.append(
'Certificate %s and CSR %s do not match: subject mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if not self._validate_csr_extensions():
messages.append(
'Certificate %s and CSR %s do not match: extensions mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if self.signature_algorithms is not None:
wrong_alg = self._validate_signature_algorithms()
if wrong_alg:
messages.append(
'Invalid signature algorithm (got %s, expected one of %s)' %
(wrong_alg, self.signature_algorithms)
)
if self.subject is not None:
failure = self._validate_subject()
if failure:
dummy, cert_subject = failure
messages.append(
'Invalid subject component (got %s, expected all of %s to be present)' %
(cert_subject, self.subject)
)
if self.issuer is not None:
failure = self._validate_issuer()
if failure:
dummy, cert_issuer = failure
messages.append(
'Invalid issuer component (got %s, expected all of %s to be present)' % (cert_issuer, self.issuer)
)
if self.has_expired is not None:
cert_expired = self._validate_has_expired()
if cert_expired != self.has_expired:
messages.append(
'Certificate expiration check failed (certificate expiration is %s, expected %s)' %
(cert_expired, self.has_expired)
)
if self.version is not None:
cert_version = self._validate_version()
if cert_version != self.version:
messages.append(
'Invalid certificate version number (got %s, expected %s)' %
(cert_version, self.version)
)
if self.key_usage is not None:
failure = self._validate_key_usage()
if failure == NO_EXTENSION:
messages.append('Found no keyUsage extension')
elif failure:
dummy, cert_key_usage = failure
messages.append(
'Invalid keyUsage components (got %s, expected all of %s to be present)' %
(cert_key_usage, self.key_usage)
)
if self.extended_key_usage is not None:
failure = self._validate_extended_key_usage()
if failure == NO_EXTENSION:
messages.append('Found no extendedKeyUsage extension')
elif failure:
dummy, ext_cert_key_usage = failure
messages.append(
'Invalid extendedKeyUsage component (got %s, expected all of %s to be present)' % (ext_cert_key_usage, self.extended_key_usage)
)
if self.subject_alt_name is not None:
failure = self._validate_subject_alt_name()
if failure == NO_EXTENSION:
messages.append('Found no subjectAltName extension')
elif failure:
dummy, cert_san = failure
messages.append(
'Invalid subjectAltName component (got %s, expected all of %s to be present)' %
(cert_san, self.subject_alt_name)
)
if self.not_before is not None:
cert_not_valid_before = self._validate_not_before()
if cert_not_valid_before != get_relative_time_option(self.not_before, 'not_before', backend=self.backend):
messages.append(
'Invalid not_before component (got %s, expected %s to be present)' %
(cert_not_valid_before, self.not_before)
)
if self.not_after is not None:
cert_not_valid_after = self._validate_not_after()
if cert_not_valid_after != get_relative_time_option(self.not_after, 'not_after', backend=self.backend):
messages.append(
'Invalid not_after component (got %s, expected %s to be present)' %
(cert_not_valid_after, self.not_after)
)
if self.valid_at is not None:
not_before, valid_at, not_after = self._validate_valid_at()
if not (not_before <= valid_at <= not_after):
messages.append(
'Certificate is not valid for the specified date (%s) - not_before: %s - not_after: %s' %
(self.valid_at, not_before, not_after)
)
if self.invalid_at is not None:
not_before, invalid_at, not_after = self._validate_invalid_at()
if not_before <= invalid_at <= not_after:
messages.append(
'Certificate is not invalid for the specified date (%s) - not_before: %s - not_after: %s' %
(self.invalid_at, not_before, not_after)
)
if self.valid_in is not None:
not_before, valid_in, not_after = self._validate_valid_in()
if not not_before <= valid_in <= not_after:
messages.append(
'Certificate is not valid in %s from now (that would be %s) - not_before: %s - not_after: %s' %
(self.valid_in, valid_in, not_before, not_after)
)
return messages
def generate(self, module):
"""Don't generate anything - only assert"""
messages = self.assertonly(module)
if messages:
module.fail_json(msg=' | '.join(messages))
def check(self, module, perms_required=False):
"""Ensure the resource is in its desired state."""
messages = self.assertonly(module)
return len(messages) == 0
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
}
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
return result
class AssertOnlyCertificateCryptography(AssertOnlyCertificateBase):
"""Validate the supplied cert, using the cryptography backend"""
def __init__(self, module):
super(AssertOnlyCertificateCryptography, self).__init__(module, 'cryptography')
def _validate_privatekey(self):
return cryptography_compare_public_keys(self.cert.public_key(), self.privatekey.public_key())
def _validate_csr_signature(self):
if not self.csr.is_signature_valid:
return False
return cryptography_compare_public_keys(self.csr.public_key(), self.cert.public_key())
def _validate_csr_subject(self):
return self.csr.subject == self.cert.subject
def _validate_csr_extensions(self):
cert_exts = self.cert.extensions
csr_exts = self.csr.extensions
if len(cert_exts) != len(csr_exts):
return False
for cert_ext in cert_exts:
try:
csr_ext = csr_exts.get_extension_for_oid(cert_ext.oid)
if cert_ext != csr_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def _validate_signature_algorithms(self):
if self.cert.signature_algorithm_oid._name not in self.signature_algorithms:
return self.cert.signature_algorithm_oid._name
def _validate_subject(self):
expected_subject = Name([NameAttribute(oid=cryptography_name_to_oid(sub[0]), value=to_text(sub[1]))
for sub in self.subject])
cert_subject = self.cert.subject
if not compare_sets(expected_subject, cert_subject, self.subject_strict):
return expected_subject, cert_subject
def _validate_issuer(self):
expected_issuer = Name([NameAttribute(oid=cryptography_name_to_oid(iss[0]), value=to_text(iss[1]))
for iss in self.issuer])
cert_issuer = self.cert.issuer
if not compare_sets(expected_issuer, cert_issuer, self.issuer_strict):
return self.issuer, cert_issuer
def _validate_has_expired(self):
cert_not_after = self.cert.not_valid_after
cert_expired = cert_not_after < datetime.datetime.utcnow()
return cert_expired
def _validate_version(self):
if self.cert.version == x509.Version.v1:
return 1
if self.cert.version == x509.Version.v3:
return 3
return "unknown"
def _validate_key_usage(self):
try:
current_key_usage = self.cert.extensions.get_extension_for_class(x509.KeyUsage).value
test_key_usage = dict(
digital_signature=current_key_usage.digital_signature,
content_commitment=current_key_usage.content_commitment,
key_encipherment=current_key_usage.key_encipherment,
data_encipherment=current_key_usage.data_encipherment,
key_agreement=current_key_usage.key_agreement,
key_cert_sign=current_key_usage.key_cert_sign,
crl_sign=current_key_usage.crl_sign,
encipher_only=False,
decipher_only=False
)
if test_key_usage['key_agreement']:
test_key_usage.update(dict(
encipher_only=current_key_usage.encipher_only,
decipher_only=current_key_usage.decipher_only
))
key_usages = cryptography_parse_key_usage_params(self.key_usage)
if not compare_dicts(key_usages, test_key_usage, self.key_usage_strict):
return self.key_usage, [k for k, v in test_key_usage.items() if v is True]
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.key_usage:
return NO_EXTENSION
def _validate_extended_key_usage(self):
try:
current_ext_keyusage = self.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage).value
usages = [cryptography_name_to_oid(usage) for usage in self.extended_key_usage]
expected_ext_keyusage = x509.ExtendedKeyUsage(usages)
if not compare_sets(expected_ext_keyusage, current_ext_keyusage, self.extended_key_usage_strict):
return [eku.value for eku in expected_ext_keyusage], [eku.value for eku in current_ext_keyusage]
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.extended_key_usage:
return NO_EXTENSION
def _validate_subject_alt_name(self):
try:
current_san = self.cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
expected_san = [cryptography_get_name(san) for san in self.subject_alt_name]
if not compare_sets(expected_san, current_san, self.subject_alt_name_strict):
return self.subject_alt_name, current_san
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.subject_alt_name:
return NO_EXTENSION
def _validate_not_before(self):
return self.cert.not_valid_before
def _validate_not_after(self):
return self.cert.not_valid_after
def _validate_valid_at(self):
rt = get_relative_time_option(self.valid_at, 'valid_at', backend=self.backend)
return self.cert.not_valid_before, rt, self.cert.not_valid_after
def _validate_invalid_at(self):
rt = get_relative_time_option(self.invalid_at, 'invalid_at', backend=self.backend)
return self.cert.not_valid_before, rt, self.cert.not_valid_after
def _validate_valid_in(self):
valid_in_date = get_relative_time_option(self.valid_in, "valid_in", backend=self.backend)
return self.cert.not_valid_before, valid_in_date, self.cert.not_valid_after
class AssertOnlyCertificate(AssertOnlyCertificateBase):
"""validate the supplied certificate."""
def __init__(self, module):
super(AssertOnlyCertificate, self).__init__(module, 'pyopenssl')
# Ensure inputs are properly sanitized before comparison.
for param in ['signature_algorithms', 'key_usage', 'extended_key_usage',
'subject_alt_name', 'subject', 'issuer', 'not_before',
'not_after', 'valid_at', 'invalid_at']:
attr = getattr(self, param)
if isinstance(attr, list) and attr:
if isinstance(attr[0], str):
setattr(self, param, [to_bytes(item) for item in attr])
elif isinstance(attr[0], tuple):
setattr(self, param, [(to_bytes(item[0]), to_bytes(item[1])) for item in attr])
elif isinstance(attr, tuple):
setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
elif isinstance(attr, dict):
setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
elif isinstance(attr, str):
setattr(self, param, to_bytes(attr))
def _validate_privatekey(self):
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey(self.privatekey)
ctx.use_certificate(self.cert)
try:
ctx.check_privatekey()
return True
except OpenSSL.SSL.Error:
return False
def _validate_csr_signature(self):
try:
self.csr.verify(self.cert.get_pubkey())
except OpenSSL.crypto.Error:
return False
def _validate_csr_subject(self):
if self.csr.get_subject() != self.cert.get_subject():
return False
def _validate_csr_extensions(self):
csr_extensions = self.csr.get_extensions()
cert_extension_count = self.cert.get_extension_count()
if len(csr_extensions) != cert_extension_count:
return False
for extension_number in range(0, cert_extension_count):
cert_extension = self.cert.get_extension(extension_number)
csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
if cert_extension.get_data() != list(csr_extension)[0].get_data():
return False
return True
def _validate_signature_algorithms(self):
if self.cert.get_signature_algorithm() not in self.signature_algorithms:
return self.cert.get_signature_algorithm()
def _validate_subject(self):
expected_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in self.subject]
cert_subject = self.cert.get_subject().get_components()
current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in cert_subject]
if not compare_sets(expected_subject, current_subject, self.subject_strict):
return expected_subject, current_subject
def _validate_issuer(self):
expected_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in self.issuer]
cert_issuer = self.cert.get_issuer().get_components()
current_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in cert_issuer]
if not compare_sets(expected_issuer, current_issuer, self.issuer_strict):
return self.issuer, cert_issuer
def _validate_has_expired(self):
# The following 3 lines are the same as the current PyOpenSSL code for cert.has_expired().
# Older version of PyOpenSSL have a buggy implementation,
# to avoid issues with those we added the code from a more recent release here.
time_string = to_native(self.cert.get_notAfter())
not_after = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
cert_expired = not_after < datetime.datetime.utcnow()
return cert_expired
def _validate_version(self):
# Version numbers in certs are off by one:
# v1: 0, v2: 1, v3: 2 ...
return self.cert.get_version() + 1
def _validate_key_usage(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'keyUsage':
found = True
expected_extension = crypto.X509Extension(b"keyUsage", False, b', '.join(self.key_usage))
key_usage = [usage.strip() for usage in to_text(expected_extension, errors='surrogate_or_strict').split(',')]
current_ku = [usage.strip() for usage in to_text(extension, errors='surrogate_or_strict').split(',')]
if not compare_sets(key_usage, current_ku, self.key_usage_strict):
return self.key_usage, str(extension).split(', ')
if not found:
# This is only bad if the user specified a non-empty list
if self.key_usage:
return NO_EXTENSION
def _validate_extended_key_usage(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'extendedKeyUsage':
found = True
extKeyUsage = [OpenSSL._util.lib.OBJ_txt2nid(keyUsage) for keyUsage in self.extended_key_usage]
current_xku = [OpenSSL._util.lib.OBJ_txt2nid(usage.strip()) for usage in
to_bytes(extension, errors='surrogate_or_strict').split(b',')]
if not compare_sets(extKeyUsage, current_xku, self.extended_key_usage_strict):
return self.extended_key_usage, str(extension).split(', ')
if not found:
# This is only bad if the user specified a non-empty list
if self.extended_key_usage:
return NO_EXTENSION
def _validate_subject_alt_name(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'subjectAltName':
found = True
l_altnames = [pyopenssl_normalize_name_attribute(altname.strip()) for altname in
to_text(extension, errors='surrogate_or_strict').split(', ')]
sans = [pyopenssl_normalize_name_attribute(to_text(san, errors='surrogate_or_strict')) for san in self.subject_alt_name]
if not compare_sets(sans, l_altnames, self.subject_alt_name_strict):
return self.subject_alt_name, l_altnames
if not found:
# This is only bad if the user specified a non-empty list
if self.subject_alt_name:
return NO_EXTENSION
def _validate_not_before(self):
return self.cert.get_notBefore()
def _validate_not_after(self):
return self.cert.get_notAfter()
def _validate_valid_at(self):
rt = get_relative_time_option(self.valid_at, "valid_at", backend=self.backend)
rt = to_bytes(rt, errors='surrogate_or_strict')
return self.cert.get_notBefore(), rt, self.cert.get_notAfter()
def _validate_invalid_at(self):
rt = get_relative_time_option(self.invalid_at, "invalid_at", backend=self.backend)
rt = to_bytes(rt, errors='surrogate_or_strict')
return self.cert.get_notBefore(), rt, self.cert.get_notAfter()
def _validate_valid_in(self):
valid_in_asn1 = get_relative_time_option(self.valid_in, "valid_in", backend=self.backend)
valid_in_date = to_bytes(valid_in_asn1, errors='surrogate_or_strict')
return self.cert.get_notBefore(), valid_in_date, self.cert.get_notAfter()
class EntrustCertificate(Certificate):
"""Retrieve a certificate using Entrust (ECS)."""
def __init__(self, module, backend):
super(EntrustCertificate, self).__init__(module, backend)
self.trackingId = None
self.notAfter = get_relative_time_option(module.params['entrust_not_after'], 'entrust_not_after', backend=self.backend)
if self.csr_content is None or not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend,
)
# ECS API defaults to using the validated organization tied to the account.
# We want to always force behavior of trying to use the organization provided in the CSR.
# To that end we need to parse out the organization from the CSR.
self.csr_org = None
if self.backend == 'pyopenssl':
csr_subject = self.csr.get_subject()
csr_subject_components = csr_subject.get_components()
for k, v in csr_subject_components:
if k.upper() == 'O':
# Entrust does not support multiple validated organizations in a single certificate
if self.csr_org is not None:
module.fail_json(msg=("Entrust provider does not currently support multiple validated organizations. Multiple organizations found in "
"Subject DN: '{0}'. ".format(csr_subject)))
else:
self.csr_org = v
elif self.backend == 'cryptography':
csr_subject_orgs = self.csr.subject.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)
if len(csr_subject_orgs) == 1:
self.csr_org = csr_subject_orgs[0].value
elif len(csr_subject_orgs) > 1:
module.fail_json(msg=("Entrust provider does not currently support multiple validated organizations. Multiple organizations found in "
"Subject DN: '{0}'. ".format(self.csr.subject)))
# If no organization in the CSR, explicitly tell ECS that it should be blank in issued cert, not defaulted to
# organization tied to the account.
if self.csr_org is None:
self.csr_org = ''
try:
self.ecs_client = ECSClient(
entrust_api_user=module.params.get('entrust_api_user'),
entrust_api_key=module.params.get('entrust_api_key'),
entrust_api_cert=module.params.get('entrust_api_client_cert_path'),
entrust_api_cert_key=module.params.get('entrust_api_client_cert_key_path'),
entrust_api_specification_path=module.params.get('entrust_api_specification_path')
)
except SessionConfigurationException as e:
module.fail_json(msg='Failed to initialize Entrust Provider: {0}'.format(to_native(e.message)))
def generate(self, module):
if not self.check(module, perms_required=False) or self.force:
# Read the CSR that was generated for us
body = {}
if self.csr_content is not None:
body['csr'] = self.csr_content
else:
with open(self.csr_path, 'r') as csr_file:
body['csr'] = csr_file.read()
body['certType'] = module.params['entrust_cert_type']
# Handle expiration (30 days if not specified)
expiry = self.notAfter
if not expiry:
gmt_now = datetime.datetime.fromtimestamp(time.mktime(time.gmtime()))
expiry = gmt_now + datetime.timedelta(days=365)
expiry_iso3339 = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z")
body['certExpiryDate'] = expiry_iso3339
body['org'] = self.csr_org
body['tracking'] = {
'requesterName': module.params['entrust_requester_name'],
'requesterEmail': module.params['entrust_requester_email'],
'requesterPhone': module.params['entrust_requester_phone'],
}
try:
result = self.ecs_client.NewCertRequest(Body=body)
self.trackingId = result.get('trackingId')
except RestOperationException as e:
module.fail_json(msg='Failed to request new certificate from Entrust Certificate Services (ECS): {0}'.format(to_native(e.message)))
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, to_bytes(result.get('endEntityCert')))
self.cert = load_certificate(self.path, backend=self.backend)
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
parent_check = super(EntrustCertificate, self).check(module, perms_required)
try:
cert_details = self._get_cert_details()
except RestOperationException as e:
module.fail_json(msg='Failed to get status of existing certificate from Entrust Certificate Services (ECS): {0}.'.format(to_native(e.message)))
# Always issue a new certificate if the certificate is expired, suspended or revoked
status = cert_details.get('status', False)
if status == 'EXPIRED' or status == 'SUSPENDED' or status == 'REVOKED':
return False
# If the requested cert type was specified and it is for a different certificate type than the initial certificate, a new one is needed
if module.params['entrust_cert_type'] and cert_details.get('certType') and module.params['entrust_cert_type'] != cert_details.get('certType'):
return False
return parent_check
def _get_cert_details(self):
cert_details = {}
if self.cert:
serial_number = None
expiry = None
if self.backend == 'pyopenssl':
serial_number = "{0:X}".format(self.cert.get_serial_number())
time_string = to_native(self.cert.get_notAfter())
expiry = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
elif self.backend == 'cryptography':
serial_number = "{0:X}".format(cryptography_serial_number_of_cert(self.cert))
expiry = self.cert.not_valid_after
# get some information about the expiry of this certificate
expiry_iso3339 = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z")
cert_details['expiresAfter'] = expiry_iso3339
# If a trackingId is not already defined (from the result of a generate)
# use the serial number to identify the tracking Id
if self.trackingId is None and serial_number is not None:
cert_results = self.ecs_client.GetCertificates(serialNumber=serial_number).get('certificates', {})
# Finding 0 or more than 1 result is a very unlikely use case, it simply means we cannot perform additional checks
# on the 'state' as returned by Entrust Certificate Services (ECS). The general certificate validity is
# still checked as it is in the rest of the module.
if len(cert_results) == 1:
self.trackingId = cert_results[0].get('trackingId')
if self.trackingId is not None:
cert_details.update(self.ecs_client.GetCertificate(trackingId=self.trackingId))
return cert_details
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
result.update(self._get_cert_details())
return result
class AcmeCertificate(Certificate):
"""Retrieve a certificate using the ACME protocol."""
# Since there's no real use of the backend,
# other than the 'self.check' function, we just pass the backend to the constructor
def __init__(self, module, backend):
super(AcmeCertificate, self).__init__(module, backend)
self.accountkey_path = module.params['acme_accountkey_path']
self.challenge_path = module.params['acme_challenge_path']
self.use_chain = module.params['acme_chain']
self.acme_directory = module.params['acme_directory']
def generate(self, module):
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not os.path.exists(self.accountkey_path):
raise CertificateError(
'The account key %s does not exist' % self.accountkey_path
)
if not os.path.exists(self.challenge_path):
raise CertificateError(
'The challenge path %s does not exist' % self.challenge_path
)
if not self.check(module, perms_required=False) or self.force:
acme_tiny_path = self.module.get_bin_path('acme-tiny', required=True)
command = [acme_tiny_path]
if self.use_chain:
command.append('--chain')
command.extend(['--account-key', self.accountkey_path])
if self.csr_content is not None:
# We need to temporarily write the CSR to disk
fd, tmpsrc = tempfile.mkstemp()
module.add_cleanup_file(tmpsrc) # Ansible will delete the file on exit
f = os.fdopen(fd, 'wb')
try:
f.write(self.csr_content)
except Exception as err:
try:
f.close()
except Exception as dummy:
pass
module.fail_json(
msg="failed to create temporary CSR file: %s" % to_native(err),
exception=traceback.format_exc()
)
f.close()
command.extend(['--csr', tmpsrc])
else:
command.extend(['--csr', self.csr_path])
command.extend(['--acme-dir', self.challenge_path])
command.extend(['--directory-url', self.acme_directory])
try:
crt = module.run_command(command, check_rc=True)[1]
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, to_bytes(crt))
self.changed = True
except OSError as exc:
raise CertificateError(exc)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'accountkey': self.accountkey_path,
'csr': self.csr_path,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
path=dict(type='path', required=True),
provider=dict(type='str', choices=['acme', 'assertonly', 'entrust', 'ownca', 'selfsigned']),
force=dict(type='bool', default=False,),
csr_path=dict(type='path'),
csr_content=dict(type='str'),
backup=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
return_content=dict(type='bool', default=False),
# General properties of a certificate
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
privatekey_passphrase=dict(type='str', no_log=True),
# provider: assertonly
signature_algorithms=dict(type='list', elements='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_strict=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
issuer=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
issuer_strict=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
has_expired=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
version=dict(type='int', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
key_usage=dict(type='list', elements='str', aliases=['keyUsage'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
key_usage_strict=dict(type='bool', default=False, aliases=['keyUsage_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
extended_key_usage=dict(type='list', elements='str', aliases=['extendedKeyUsage'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
extended_key_usage_strict=dict(type='bool', default=False, aliases=['extendedKeyUsage_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_alt_name=dict(type='list', elements='str', aliases=['subjectAltName'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_alt_name_strict=dict(type='bool', default=False, aliases=['subjectAltName_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
not_before=dict(type='str', aliases=['notBefore'], removed_in_version='2.0.0', removed_from_collection='community.crypto'),
not_after=dict(type='str', aliases=['notAfter'], removed_in_version='2.0.0', removed_from_collection='community.crypto'),
valid_at=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
invalid_at=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
valid_in=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
# provider: selfsigned
selfsigned_version=dict(type='int', default=3),
selfsigned_digest=dict(type='str', default='sha256'),
selfsigned_not_before=dict(type='str', default='+0s', aliases=['selfsigned_notBefore']),
selfsigned_not_after=dict(type='str', default='+3650d', aliases=['selfsigned_notAfter']),
selfsigned_create_subject_key_identifier=dict(
type='str',
default='create_if_not_provided',
choices=['create_if_not_provided', 'always_create', 'never_create']
),
# provider: ownca
ownca_path=dict(type='path'),
ownca_content=dict(type='str'),
ownca_privatekey_path=dict(type='path'),
ownca_privatekey_content=dict(type='str'),
ownca_privatekey_passphrase=dict(type='str', no_log=True),
ownca_digest=dict(type='str', default='sha256'),
ownca_version=dict(type='int', default=3),
ownca_not_before=dict(type='str', default='+0s'),
ownca_not_after=dict(type='str', default='+3650d'),
ownca_create_subject_key_identifier=dict(
type='str',
default='create_if_not_provided',
choices=['create_if_not_provided', 'always_create', 'never_create']
),
ownca_create_authority_key_identifier=dict(type='bool', default=True),
# provider: acme
acme_accountkey_path=dict(type='path'),
acme_challenge_path=dict(type='path'),
acme_chain=dict(type='bool', default=False),
acme_directory=dict(type='str', default="https://acme-v02.api.letsencrypt.org/directory"),
# provider: entrust
entrust_cert_type=dict(type='str', default='STANDARD_SSL',
choices=['STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL',
'PRIVATE_SSL', 'PD_SSL', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT']),
entrust_requester_email=dict(type='str'),
entrust_requester_name=dict(type='str'),
entrust_requester_phone=dict(type='str'),
entrust_api_user=dict(type='str'),
entrust_api_key=dict(type='str', no_log=True),
entrust_api_client_cert_path=dict(type='path'),
entrust_api_client_cert_key_path=dict(type='path', no_log=True),
entrust_api_specification_path=dict(type='path', default='https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml'),
entrust_not_after=dict(type='str', default='+365d'),
),
supports_check_mode=True,
add_file_common_args=True,
required_if=[
['state', 'present', ['provider']],
['provider', 'entrust', ['entrust_requester_email', 'entrust_requester_name', 'entrust_requester_phone',
'entrust_api_user', 'entrust_api_key', 'entrust_api_client_cert_path',
'entrust_api_client_cert_key_path']],
],
mutually_exclusive=[
['csr_path', 'csr_content'],
['privatekey_path', 'privatekey_content'],
['ownca_path', 'ownca_content'],
['ownca_privatekey_path', 'ownca_privatekey_content'],
],
)
if module._name == 'community.crypto.openssl_certificate':
module.deprecate("The 'community.crypto.openssl_certificate' module has been renamed to 'community.crypto.x509_certificate'",
version='2.0.0', collection_name='community.crypto')
try:
if module.params['state'] == 'absent':
certificate = CertificateAbsent(module)
else:
if module.params['provider'] != 'assertonly' and module.params['csr_path'] is None and module.params['csr_content'] is None:
module.fail_json(msg='csr_path or csr_content is required when provider is not assertonly')
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
provider = module.params['provider']
if provider == 'assertonly':
module.deprecate("The 'assertonly' provider is deprecated; please see the examples of "
"the 'x509_certificate' module on how to replace it with other modules",
version='2.0.0', collection_name='community.crypto')
elif provider == 'selfsigned':
if module.params['privatekey_path'] is None and module.params['privatekey_content'] is None:
module.fail_json(msg='One of privatekey_path and privatekey_content must be specified for the selfsigned provider.')
elif provider == 'acme':
if module.params['acme_accountkey_path'] is None:
module.fail_json(msg='The acme_accountkey_path option must be specified for the acme provider.')
if module.params['acme_challenge_path'] is None:
module.fail_json(msg='The acme_challenge_path option must be specified for the acme provider.')
elif provider == 'ownca':
if module.params['ownca_path'] is None and module.params['ownca_content'] is None:
module.fail_json(msg='One of ownca_path and ownca_content must be specified for the ownca provider.')
if module.params['ownca_privatekey_path'] is None and module.params['ownca_privatekey_content'] is None:
module.fail_json(msg='One of ownca_privatekey_path and ownca_privatekey_content must be specified for the ownca provider.')
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detect what backend we can use
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# If cryptography is available we'll use it
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
if module.params['selfsigned_version'] == 2 or module.params['ownca_version'] == 2:
module.warn('crypto backend forced to pyopenssl. The cryptography library does not support v2 certificates')
backend = 'pyopenssl'
# Fail if no backend has been found
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
if module.params['provider'] in ['selfsigned', 'ownca', 'assertonly']:
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15')
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
if provider == 'selfsigned':
certificate = SelfSignedCertificate(module)
elif provider == 'acme':
certificate = AcmeCertificate(module, 'pyopenssl')
elif provider == 'ownca':
certificate = OwnCACertificate(module)
elif provider == 'entrust':
certificate = EntrustCertificate(module, 'pyopenssl')
else:
certificate = AssertOnlyCertificate(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
if module.params['selfsigned_version'] == 2 or module.params['ownca_version'] == 2:
module.fail_json(msg='The cryptography backend does not support v2 certificates, '
'use select_crypto_backend=pyopenssl for v2 certificates')
if provider == 'selfsigned':
certificate = SelfSignedCertificateCryptography(module)
elif provider == 'acme':
certificate = AcmeCertificate(module, 'cryptography')
elif provider == 'ownca':
certificate = OwnCACertificateCryptography(module)
elif provider == 'entrust':
certificate = EntrustCertificate(module, 'cryptography')
else:
certificate = AssertOnlyCertificateCryptography(module)
if module.params['state'] == 'present':
if module.check_mode:
result = certificate.dump(check_mode=True)
result['changed'] = module.params['force'] or not certificate.check(module)
module.exit_json(**result)
certificate.generate(module)
else:
if module.check_mode:
result = certificate.dump(check_mode=True)
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
certificate.remove(module)
result = certificate.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/bad_4295_6 |
crossvul-python_data_good_4295_1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyrigt: (c) 2017, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_csr
short_description: Generate OpenSSL Certificate Signing Request (CSR)
description:
- This module allows one to (re)generate OpenSSL certificate signing requests.
- It uses the pyOpenSSL python library to interact with openssl. This module supports
the subjectAltName, keyUsage, extendedKeyUsage, basicConstraints and OCSP Must Staple
extensions.
- "Please note that the module regenerates existing CSR if it doesn't match the module's
options, or if it seems to be corrupt. If you are concerned that this could overwrite
your existing CSR, consider using the I(backup) option."
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0."
requirements:
- Either cryptography >= 1.3
- Or pyOpenSSL >= 0.15
author:
- Yanis Guenane (@Spredzy)
options:
state:
description:
- Whether the certificate signing request should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
digest:
description:
- The digest used when signing the certificate signing request with the private key.
type: str
default: sha256
privatekey_path:
description:
- The path to the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: path
privatekey_content:
description:
- The content of the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: str
version_added: "1.0.0"
privatekey_passphrase:
description:
- The passphrase for the private key.
- This is required if the private key is password protected.
type: str
version:
description:
- The version of the certificate signing request.
- "The only allowed value according to L(RFC 2986,https://tools.ietf.org/html/rfc2986#section-4.1)
is 1."
- This option will no longer accept unsupported values from Ansible 2.14 on.
type: int
default: 1
force:
description:
- Should the certificate signing request be forced regenerated by this ansible module.
type: bool
default: no
path:
description:
- The name of the file into which the generated OpenSSL certificate signing request will be written.
type: path
required: true
subject:
description:
- Key/value pairs that will be present in the subject name field of the certificate signing request.
- If you need to specify more than one value with the same key, use a list as value.
type: dict
country_name:
description:
- The countryName field of the certificate signing request subject.
type: str
aliases: [ C, countryName ]
state_or_province_name:
description:
- The stateOrProvinceName field of the certificate signing request subject.
type: str
aliases: [ ST, stateOrProvinceName ]
locality_name:
description:
- The localityName field of the certificate signing request subject.
type: str
aliases: [ L, localityName ]
organization_name:
description:
- The organizationName field of the certificate signing request subject.
type: str
aliases: [ O, organizationName ]
organizational_unit_name:
description:
- The organizationalUnitName field of the certificate signing request subject.
type: str
aliases: [ OU, organizationalUnitName ]
common_name:
description:
- The commonName field of the certificate signing request subject.
type: str
aliases: [ CN, commonName ]
email_address:
description:
- The emailAddress field of the certificate signing request subject.
type: str
aliases: [ E, emailAddress ]
subject_alt_name:
description:
- SAN extension to attach to the certificate signing request.
- This can either be a 'comma separated string' or a YAML list.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
- Note that if no SAN is specified, but a common name, the common
name will be added as a SAN except if C(useCommonNameForSAN) is
set to I(false).
- More at U(https://tools.ietf.org/html/rfc5280#section-4.2.1.6).
type: list
elements: str
aliases: [ subjectAltName ]
subject_alt_name_critical:
description:
- Should the subjectAltName extension be considered as critical.
type: bool
aliases: [ subjectAltName_critical ]
use_common_name_for_san:
description:
- If set to C(yes), the module will fill the common name in for
C(subject_alt_name) with C(DNS:) prefix if no SAN is specified.
type: bool
default: yes
aliases: [ useCommonNameForSAN ]
key_usage:
description:
- This defines the purpose (e.g. encipherment, signature, certificate signing)
of the key contained in the certificate.
type: list
elements: str
aliases: [ keyUsage ]
key_usage_critical:
description:
- Should the keyUsage extension be considered as critical.
type: bool
aliases: [ keyUsage_critical ]
extended_key_usage:
description:
- Additional restrictions (e.g. client authentication, server authentication)
on the allowed purposes for which the public key may be used.
type: list
elements: str
aliases: [ extKeyUsage, extendedKeyUsage ]
extended_key_usage_critical:
description:
- Should the extkeyUsage extension be considered as critical.
type: bool
aliases: [ extKeyUsage_critical, extendedKeyUsage_critical ]
basic_constraints:
description:
- Indicates basic constraints, such as if the certificate is a CA.
type: list
elements: str
aliases: [ basicConstraints ]
basic_constraints_critical:
description:
- Should the basicConstraints extension be considered as critical.
type: bool
aliases: [ basicConstraints_critical ]
ocsp_must_staple:
description:
- Indicates that the certificate should contain the OCSP Must Staple
extension (U(https://tools.ietf.org/html/rfc7633)).
type: bool
aliases: [ ocspMustStaple ]
ocsp_must_staple_critical:
description:
- Should the OCSP Must Staple extension be considered as critical.
- Note that according to the RFC, this extension should not be marked
as critical, as old clients not knowing about OCSP Must Staple
are required to reject such certificates
(see U(https://tools.ietf.org/html/rfc7633#section-4)).
type: bool
aliases: [ ocspMustStaple_critical ]
name_constraints_permitted:
description:
- For CA certificates, this specifies a list of identifiers which describe
subtrees of names that this CA is allowed to issue certificates for.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
type: list
elements: str
version_added: 1.1.0
name_constraints_excluded:
description:
- For CA certificates, this specifies a list of identifiers which describe
subtrees of names that this CA is *not* allowed to issue certificates for.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
type: list
elements: str
version_added: 1.1.0
name_constraints_critical:
description:
- Should the Name Constraints extension be considered as critical.
type: bool
version_added: 1.1.0
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
backup:
description:
- Create a backup file including a timestamp so you can get the original
CSR back if you overwrote it with a new one by accident.
type: bool
default: no
create_subject_key_identifier:
description:
- Create the Subject Key Identifier from the public key.
- "Please note that commercial CAs can ignore the value, respectively use a value of
their own choice instead. Specifying this option is mostly useful for self-signed
certificates or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
type: bool
default: no
subject_key_identifier:
description:
- The subject key identifier as a hex string, where two bytes are separated by colons.
- "Example: C(00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33)"
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this option can only be used if I(create_subject_key_identifier) is C(no).
- Note that this is only supported if the C(cryptography) backend is used!
type: str
authority_key_identifier:
description:
- The authority key identifier as a hex string, where two bytes are separated by colons.
- "Example: C(00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33)"
- If specified, I(authority_cert_issuer) must also be specified.
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: str
authority_cert_issuer:
description:
- Names that will be present in the authority cert issuer field of the certificate signing request.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA)
- "Example: C(DNS:ca.example.org)"
- If specified, I(authority_key_identifier) must also be specified.
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: list
elements: str
authority_cert_serial_number:
description:
- The authority cert serial number.
- Note that this is only supported if the C(cryptography) backend is used!
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: int
return_content:
description:
- If set to C(yes), will return the (current or generated) CSR's content as I(csr).
type: bool
default: no
version_added: "1.0.0"
extends_documentation_fragment:
- files
notes:
- If the certificate signing request already exists it will be checked whether subjectAltName,
keyUsage, extendedKeyUsage and basicConstraints only contain the requested values, whether
OCSP Must Staple is as requested, and if the request was signed by the given private key.
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL Certificate Signing Request
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with an inline key
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_content: "{{ private_key_content }}"
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with a passphrase protected private key
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with Subject information
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
country_name: FR
organization_name: Ansible
email_address: jdoe@ansible.com
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with subjectAltName extension
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
subject_alt_name: 'DNS:www.ansible.com,DNS:m.ansible.com'
- name: Generate an OpenSSL CSR with subjectAltName extension with dynamic list
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
subject_alt_name: "{{ item.value | map('regex_replace', '^', 'DNS:') | list }}"
with_dict:
dns_server:
- www.ansible.com
- m.ansible.com
- name: Force regenerate an OpenSSL Certificate Signing Request
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
force: yes
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with special key usages
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
key_usage:
- digitalSignature
- keyAgreement
extended_key_usage:
- clientAuth
- name: Generate an OpenSSL Certificate Signing Request with OCSP Must Staple
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
ocsp_must_staple: yes
- name: Generate an OpenSSL Certificate Signing Request for WinRM Certificate authentication
community.crypto.openssl_csr:
path: /etc/ssl/csr/winrm.auth.csr
privatekey_path: /etc/ssl/private/winrm.auth.pem
common_name: username
extended_key_usage:
- clientAuth
subject_alt_name: otherName:1.3.6.1.4.1.311.20.2.3;UTF8:username@localhost
'''
RETURN = r'''
privatekey:
description:
- Path to the TLS/SSL private key the CSR was generated for
- Will be C(none) if the private key has been provided in I(privatekey_content).
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
filename:
description: Path to the generated Certificate Signing Request
returned: changed or success
type: str
sample: /etc/ssl/csr/www.ansible.com.csr
subject:
description: A list of the subject tuples attached to the CSR
returned: changed or success
type: list
elements: list
sample: "[('CN', 'www.ansible.com'), ('O', 'Ansible')]"
subjectAltName:
description: The alternative names this CSR is valid for
returned: changed or success
type: list
elements: str
sample: [ 'DNS:www.ansible.com', 'DNS:m.ansible.com' ]
keyUsage:
description: Purpose for which the public key may be used
returned: changed or success
type: list
elements: str
sample: [ 'digitalSignature', 'keyAgreement' ]
extendedKeyUsage:
description: Additional restriction on the public key purposes
returned: changed or success
type: list
elements: str
sample: [ 'clientAuth' ]
basicConstraints:
description: Indicates if the certificate belongs to a CA
returned: changed or success
type: list
elements: str
sample: ['CA:TRUE', 'pathLenConstraint:0']
ocsp_must_staple:
description: Indicates whether the certificate has the OCSP
Must Staple feature enabled
returned: changed or success
type: bool
sample: false
name_constraints_permitted:
description: List of permitted subtrees to sign certificates for.
returned: changed or success
type: list
elements: str
sample: ['email:.somedomain.com']
version_added: 1.1.0
name_constraints_excluded:
description: List of excluded subtrees the CA cannot sign certificates for.
returned: changed or success
type: list
elements: str
sample: ['email:.com']
version_added: 1.1.0
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/www.ansible.com.csr.2019-03-09@11:22~
csr:
description: The (current or generated) CSR's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: "1.0.0"
'''
import abc
import binascii
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate_request,
parse_name_field,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_get_basic_constraints,
cryptography_get_name,
cryptography_name_to_oid,
cryptography_key_needs_digest_for_signing,
cryptography_parse_key_usage_params,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.pyopenssl_support import (
pyopenssl_normalize_name_attribute,
pyopenssl_parse_name_constraints,
)
MINIMAL_PYOPENSSL_VERSION = '0.15'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# OpenSSL 1.1.0 or newer
OPENSSL_MUST_STAPLE_NAME = b"tlsfeature"
OPENSSL_MUST_STAPLE_VALUE = b"status_request"
else:
# OpenSSL 1.0.x or older
OPENSSL_MUST_STAPLE_NAME = b"1.3.6.1.5.5.7.1.24"
OPENSSL_MUST_STAPLE_VALUE = b"DER:30:03:02:01:05"
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.x509
import cryptography.x509.oid
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.hashes
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
CRYPTOGRAPHY_MUST_STAPLE_NAME = cryptography.x509.oid.ObjectIdentifier("1.3.6.1.5.5.7.1.24")
CRYPTOGRAPHY_MUST_STAPLE_VALUE = b"\x30\x03\x02\x01\x05"
class CertificateSigningRequestError(OpenSSLObjectError):
pass
class CertificateSigningRequestBase(OpenSSLObject):
def __init__(self, module):
super(CertificateSigningRequestBase, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.digest = module.params['digest']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.version = module.params['version']
self.subjectAltName = module.params['subject_alt_name']
self.subjectAltName_critical = module.params['subject_alt_name_critical']
self.keyUsage = module.params['key_usage']
self.keyUsage_critical = module.params['key_usage_critical']
self.extendedKeyUsage = module.params['extended_key_usage']
self.extendedKeyUsage_critical = module.params['extended_key_usage_critical']
self.basicConstraints = module.params['basic_constraints']
self.basicConstraints_critical = module.params['basic_constraints_critical']
self.ocspMustStaple = module.params['ocsp_must_staple']
self.ocspMustStaple_critical = module.params['ocsp_must_staple_critical']
self.name_constraints_permitted = module.params['name_constraints_permitted'] or []
self.name_constraints_excluded = module.params['name_constraints_excluded'] or []
self.name_constraints_critical = module.params['name_constraints_critical']
self.create_subject_key_identifier = module.params['create_subject_key_identifier']
self.subject_key_identifier = module.params['subject_key_identifier']
self.authority_key_identifier = module.params['authority_key_identifier']
self.authority_cert_issuer = module.params['authority_cert_issuer']
self.authority_cert_serial_number = module.params['authority_cert_serial_number']
self.request = None
self.privatekey = None
self.csr_bytes = None
self.return_content = module.params['return_content']
if self.create_subject_key_identifier and self.subject_key_identifier is not None:
module.fail_json(msg='subject_key_identifier cannot be specified if create_subject_key_identifier is true')
self.backup = module.params['backup']
self.backup_file = None
self.subject = [
('C', module.params['country_name']),
('ST', module.params['state_or_province_name']),
('L', module.params['locality_name']),
('O', module.params['organization_name']),
('OU', module.params['organizational_unit_name']),
('CN', module.params['common_name']),
('emailAddress', module.params['email_address']),
]
if module.params['subject']:
self.subject = self.subject + parse_name_field(module.params['subject'])
self.subject = [(entry[0], entry[1]) for entry in self.subject if entry[1]]
self.using_common_name_for_san = False
if not self.subjectAltName and module.params['use_common_name_for_san']:
for sub in self.subject:
if sub[0] in ('commonName', 'CN'):
self.subjectAltName = ['DNS:%s' % sub[1]]
self.using_common_name_for_san = True
break
if self.subject_key_identifier is not None:
try:
self.subject_key_identifier = binascii.unhexlify(self.subject_key_identifier.replace(':', ''))
except Exception as e:
raise CertificateSigningRequestError('Cannot parse subject_key_identifier: {0}'.format(e))
if self.authority_key_identifier is not None:
try:
self.authority_key_identifier = binascii.unhexlify(self.authority_key_identifier.replace(':', ''))
except Exception as e:
raise CertificateSigningRequestError('Cannot parse authority_key_identifier: {0}'.format(e))
@abc.abstractmethod
def _generate_csr(self):
pass
def generate(self, module):
'''Generate the certificate signing request.'''
if not self.check(module, perms_required=False) or self.force:
result = self._generate_csr()
if self.backup:
self.backup_file = module.backup_local(self.path)
if self.return_content:
self.csr_bytes = result
write_file(module, result)
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
@abc.abstractmethod
def _load_private_key(self):
pass
@abc.abstractmethod
def _check_csr(self):
pass
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(CertificateSigningRequestBase, self).check(module, perms_required)
self._load_private_key()
if not state_and_perms:
return False
return self._check_csr()
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(CertificateSigningRequestBase, self).remove(module)
def dump(self):
'''Serialize the object into a dictionary.'''
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'subject': self.subject,
'subjectAltName': self.subjectAltName,
'keyUsage': self.keyUsage,
'extendedKeyUsage': self.extendedKeyUsage,
'basicConstraints': self.basicConstraints,
'ocspMustStaple': self.ocspMustStaple,
'changed': self.changed,
'name_constraints_permitted': self.name_constraints_permitted,
'name_constraints_excluded': self.name_constraints_excluded,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.csr_bytes is None:
self.csr_bytes = load_file_if_exists(self.path, ignore_errors=True)
result['csr'] = self.csr_bytes.decode('utf-8') if self.csr_bytes else None
return result
class CertificateSigningRequestPyOpenSSL(CertificateSigningRequestBase):
def __init__(self, module):
if module.params['create_subject_key_identifier']:
module.fail_json(msg='You cannot use create_subject_key_identifier with the pyOpenSSL backend!')
for o in ('subject_key_identifier', 'authority_key_identifier', 'authority_cert_issuer', 'authority_cert_serial_number'):
if module.params[o] is not None:
module.fail_json(msg='You cannot use {0} with the pyOpenSSL backend!'.format(o))
super(CertificateSigningRequestPyOpenSSL, self).__init__(module)
def _generate_csr(self):
req = crypto.X509Req()
req.set_version(self.version - 1)
subject = req.get_subject()
for entry in self.subject:
if entry[1] is not None:
# Workaround for https://github.com/pyca/pyopenssl/issues/165
nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(entry[0]))
if nid == 0:
raise CertificateSigningRequestError('Unknown subject field identifier "{0}"'.format(entry[0]))
res = OpenSSL._util.lib.X509_NAME_add_entry_by_NID(subject._name, nid, OpenSSL._util.lib.MBSTRING_UTF8, to_bytes(entry[1]), -1, -1, 0)
if res == 0:
raise CertificateSigningRequestError('Invalid value for subject field identifier "{0}": {1}'.format(entry[0], entry[1]))
extensions = []
if self.subjectAltName:
altnames = ', '.join(self.subjectAltName)
try:
extensions.append(crypto.X509Extension(b"subjectAltName", self.subjectAltName_critical, altnames.encode('ascii')))
except OpenSSL.crypto.Error as e:
raise CertificateSigningRequestError(
'Error while parsing Subject Alternative Names {0} (check for missing type prefix, such as "DNS:"!): {1}'.format(
', '.join(["{0}".format(san) for san in self.subjectAltName]), str(e)
)
)
if self.keyUsage:
usages = ', '.join(self.keyUsage)
extensions.append(crypto.X509Extension(b"keyUsage", self.keyUsage_critical, usages.encode('ascii')))
if self.extendedKeyUsage:
usages = ', '.join(self.extendedKeyUsage)
extensions.append(crypto.X509Extension(b"extendedKeyUsage", self.extendedKeyUsage_critical, usages.encode('ascii')))
if self.basicConstraints:
usages = ', '.join(self.basicConstraints)
extensions.append(crypto.X509Extension(b"basicConstraints", self.basicConstraints_critical, usages.encode('ascii')))
if self.name_constraints_permitted or self.name_constraints_excluded:
usages = ', '.join(
['permitted;{0}'.format(name) for name in self.name_constraints_permitted] +
['excluded;{0}'.format(name) for name in self.name_constraints_excluded]
)
extensions.append(crypto.X509Extension(b"nameConstraints", self.name_constraints_critical, usages.encode('ascii')))
if self.ocspMustStaple:
extensions.append(crypto.X509Extension(OPENSSL_MUST_STAPLE_NAME, self.ocspMustStaple_critical, OPENSSL_MUST_STAPLE_VALUE))
if extensions:
req.add_extensions(extensions)
req.set_pubkey(self.privatekey)
req.sign(self.privatekey, self.digest)
self.request = req
return crypto.dump_certificate_request(crypto.FILETYPE_PEM, self.request)
def _load_private_key(self):
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase
)
except OpenSSLBadPassphraseError as exc:
raise CertificateSigningRequestError(exc)
def _check_csr(self):
def _check_subject(csr):
subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in self.subject]
current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in csr.get_subject().get_components()]
if not set(subject) == set(current_subject):
return False
return True
def _check_subjectAltName(extensions):
altnames_ext = next((ext for ext in extensions if ext.get_short_name() == b'subjectAltName'), '')
altnames = [pyopenssl_normalize_name_attribute(altname.strip()) for altname in
to_text(altnames_ext, errors='surrogate_or_strict').split(',') if altname.strip()]
if self.subjectAltName:
if (set(altnames) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.subjectAltName]) or
altnames_ext.get_critical() != self.subjectAltName_critical):
return False
else:
if altnames:
return False
return True
def _check_keyUsage_(extensions, extName, expected, critical):
usages_ext = [ext for ext in extensions if ext.get_short_name() == extName]
if (not usages_ext and expected) or (usages_ext and not expected):
return False
elif not usages_ext and not expected:
return True
else:
current = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage.strip())) for usage in str(usages_ext[0]).split(',')]
expected = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage)) for usage in expected]
return set(current) == set(expected) and usages_ext[0].get_critical() == critical
def _check_keyUsage(extensions):
usages_ext = [ext for ext in extensions if ext.get_short_name() == b'keyUsage']
if (not usages_ext and self.keyUsage) or (usages_ext and not self.keyUsage):
return False
elif not usages_ext and not self.keyUsage:
return True
else:
# OpenSSL._util.lib.OBJ_txt2nid() always returns 0 for all keyUsage values
# (since keyUsage has a fixed bitfield for these values and is not extensible).
# Therefore, we create an extension for the wanted values, and compare the
# data of the extensions (which is the serialized bitfield).
expected_ext = crypto.X509Extension(b"keyUsage", False, ', '.join(self.keyUsage).encode('ascii'))
return usages_ext[0].get_data() == expected_ext.get_data() and usages_ext[0].get_critical() == self.keyUsage_critical
def _check_extenededKeyUsage(extensions):
return _check_keyUsage_(extensions, b'extendedKeyUsage', self.extendedKeyUsage, self.extendedKeyUsage_critical)
def _check_basicConstraints(extensions):
return _check_keyUsage_(extensions, b'basicConstraints', self.basicConstraints, self.basicConstraints_critical)
def _check_nameConstraints(extensions):
nc_ext = next((ext for ext in extensions if ext.get_short_name() == b'nameConstraints'), '')
permitted, excluded = pyopenssl_parse_name_constraints(nc_ext)
if self.name_constraints_permitted or self.name_constraints_excluded:
if set(permitted) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.name_constraints_permitted]):
return False
if set(excluded) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.name_constraints_excluded]):
return False
if nc_ext.get_critical() != self.name_constraints_critical:
return False
else:
if permitted or excluded:
return False
return True
def _check_ocspMustStaple(extensions):
oms_ext = [ext for ext in extensions if to_bytes(ext.get_short_name()) == OPENSSL_MUST_STAPLE_NAME and to_bytes(ext) == OPENSSL_MUST_STAPLE_VALUE]
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER < 0x10100000:
# Older versions of libssl don't know about OCSP Must Staple
oms_ext.extend([ext for ext in extensions if ext.get_short_name() == b'UNDEF' and ext.get_data() == b'\x30\x03\x02\x01\x05'])
if self.ocspMustStaple:
return len(oms_ext) > 0 and oms_ext[0].get_critical() == self.ocspMustStaple_critical
else:
return len(oms_ext) == 0
def _check_extensions(csr):
extensions = csr.get_extensions()
return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
_check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and
_check_ocspMustStaple(extensions) and _check_nameConstraints(extensions))
def _check_signature(csr):
try:
return csr.verify(self.privatekey)
except crypto.Error:
return False
try:
csr = load_certificate_request(self.path, backend='pyopenssl')
except Exception as dummy:
return False
return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
class CertificateSigningRequestCryptography(CertificateSigningRequestBase):
def __init__(self, module):
super(CertificateSigningRequestCryptography, self).__init__(module)
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
self.module = module
if self.version != 1:
module.warn('The cryptography backend only supports version 1. (The only valid value according to RFC 2986.)')
def _generate_csr(self):
csr = cryptography.x509.CertificateSigningRequestBuilder()
try:
csr = csr.subject_name(cryptography.x509.Name([
cryptography.x509.NameAttribute(cryptography_name_to_oid(entry[0]), to_text(entry[1])) for entry in self.subject
]))
except ValueError as e:
raise CertificateSigningRequestError(e)
if self.subjectAltName:
csr = csr.add_extension(cryptography.x509.SubjectAlternativeName([
cryptography_get_name(name) for name in self.subjectAltName
]), critical=self.subjectAltName_critical)
if self.keyUsage:
params = cryptography_parse_key_usage_params(self.keyUsage)
csr = csr.add_extension(cryptography.x509.KeyUsage(**params), critical=self.keyUsage_critical)
if self.extendedKeyUsage:
usages = [cryptography_name_to_oid(usage) for usage in self.extendedKeyUsage]
csr = csr.add_extension(cryptography.x509.ExtendedKeyUsage(usages), critical=self.extendedKeyUsage_critical)
if self.basicConstraints:
params = {}
ca, path_length = cryptography_get_basic_constraints(self.basicConstraints)
csr = csr.add_extension(cryptography.x509.BasicConstraints(ca, path_length), critical=self.basicConstraints_critical)
if self.ocspMustStaple:
try:
# This only works with cryptography >= 2.1
csr = csr.add_extension(cryptography.x509.TLSFeature([cryptography.x509.TLSFeatureType.status_request]), critical=self.ocspMustStaple_critical)
except AttributeError as dummy:
csr = csr.add_extension(
cryptography.x509.UnrecognizedExtension(CRYPTOGRAPHY_MUST_STAPLE_NAME, CRYPTOGRAPHY_MUST_STAPLE_VALUE),
critical=self.ocspMustStaple_critical
)
if self.name_constraints_permitted or self.name_constraints_excluded:
try:
csr = csr.add_extension(cryptography.x509.NameConstraints(
[cryptography_get_name(name) for name in self.name_constraints_permitted],
[cryptography_get_name(name) for name in self.name_constraints_excluded],
), critical=self.name_constraints_critical)
except TypeError as e:
raise OpenSSLObjectError('Error while parsing name constraint: {0}'.format(e))
if self.create_subject_key_identifier:
csr = csr.add_extension(
cryptography.x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()),
critical=False
)
elif self.subject_key_identifier is not None:
csr = csr.add_extension(cryptography.x509.SubjectKeyIdentifier(self.subject_key_identifier), critical=False)
if self.authority_key_identifier is not None or self.authority_cert_issuer is not None or self.authority_cert_serial_number is not None:
issuers = None
if self.authority_cert_issuer is not None:
issuers = [cryptography_get_name(n) for n in self.authority_cert_issuer]
csr = csr.add_extension(
cryptography.x509.AuthorityKeyIdentifier(self.authority_key_identifier, issuers, self.authority_cert_serial_number),
critical=False
)
digest = None
if cryptography_key_needs_digest_for_signing(self.privatekey):
if self.digest == 'sha256':
digest = cryptography.hazmat.primitives.hashes.SHA256()
elif self.digest == 'sha384':
digest = cryptography.hazmat.primitives.hashes.SHA384()
elif self.digest == 'sha512':
digest = cryptography.hazmat.primitives.hashes.SHA512()
elif self.digest == 'sha1':
digest = cryptography.hazmat.primitives.hashes.SHA1()
elif self.digest == 'md5':
digest = cryptography.hazmat.primitives.hashes.MD5()
# FIXME
else:
raise CertificateSigningRequestError('Unsupported digest "{0}"'.format(self.digest))
try:
self.request = csr.sign(self.privatekey, digest, self.cryptography_backend)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and digest is None:
self.module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
except UnicodeError as e:
# This catches IDNAErrors, which happens when a bad name is passed as a SAN
# (https://github.com/ansible-collections/community.crypto/issues/105).
# For older cryptography versions, this is handled by idna, which raises
# an idna.core.IDNAError. Later versions of cryptography deprecated and stopped
# requiring idna, whence we cannot easily handle this error. Fortunately, in
# most versions of idna, IDNAError extends UnicodeError. There is only version
# 2.3 where it extends Exception instead (see
# https://github.com/kjd/idna/commit/ebefacd3134d0f5da4745878620a6a1cba86d130
# and then
# https://github.com/kjd/idna/commit/ea03c7b5db7d2a99af082e0239da2b68aeea702a).
msg = 'Error while creating CSR: {0}\n'.format(e)
if self.using_common_name_for_san:
self.module.fail_json(msg=msg + 'This is probably caused because the Common Name is used as a SAN.'
' Specifying use_common_name_for_san=false might fix this.')
self.module.fail_json(msg=msg + 'This is probably caused by an invalid Subject Alternative DNS Name.')
return self.request.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM)
def _load_private_key(self):
try:
if self.privatekey_content is not None:
content = self.privatekey_content
else:
with open(self.privatekey_path, 'rb') as f:
content = f.read()
self.privatekey = cryptography.hazmat.primitives.serialization.load_pem_private_key(
content,
None if self.privatekey_passphrase is None else to_bytes(self.privatekey_passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise CertificateSigningRequestError(e)
def _check_csr(self):
def _check_subject(csr):
subject = [(cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.subject]
current_subject = [(sub.oid, sub.value) for sub in csr.subject]
return set(subject) == set(current_subject)
def _find_extension(extensions, exttype):
return next(
(ext for ext in extensions if isinstance(ext.value, exttype)),
None
)
def _check_subjectAltName(extensions):
current_altnames_ext = _find_extension(extensions, cryptography.x509.SubjectAlternativeName)
current_altnames = [str(altname) for altname in current_altnames_ext.value] if current_altnames_ext else []
altnames = [str(cryptography_get_name(altname)) for altname in self.subjectAltName] if self.subjectAltName else []
if set(altnames) != set(current_altnames):
return False
if altnames:
if current_altnames_ext.critical != self.subjectAltName_critical:
return False
return True
def _check_keyUsage(extensions):
current_keyusage_ext = _find_extension(extensions, cryptography.x509.KeyUsage)
if not self.keyUsage:
return current_keyusage_ext is None
elif current_keyusage_ext is None:
return False
params = cryptography_parse_key_usage_params(self.keyUsage)
for param in params:
if getattr(current_keyusage_ext.value, '_' + param) != params[param]:
return False
if current_keyusage_ext.critical != self.keyUsage_critical:
return False
return True
def _check_extenededKeyUsage(extensions):
current_usages_ext = _find_extension(extensions, cryptography.x509.ExtendedKeyUsage)
current_usages = [str(usage) for usage in current_usages_ext.value] if current_usages_ext else []
usages = [str(cryptography_name_to_oid(usage)) for usage in self.extendedKeyUsage] if self.extendedKeyUsage else []
if set(current_usages) != set(usages):
return False
if usages:
if current_usages_ext.critical != self.extendedKeyUsage_critical:
return False
return True
def _check_basicConstraints(extensions):
bc_ext = _find_extension(extensions, cryptography.x509.BasicConstraints)
current_ca = bc_ext.value.ca if bc_ext else False
current_path_length = bc_ext.value.path_length if bc_ext else None
ca, path_length = cryptography_get_basic_constraints(self.basicConstraints)
# Check CA flag
if ca != current_ca:
return False
# Check path length
if path_length != current_path_length:
return False
# Check criticality
if self.basicConstraints:
if bc_ext.critical != self.basicConstraints_critical:
return False
return True
def _check_ocspMustStaple(extensions):
try:
# This only works with cryptography >= 2.1
tlsfeature_ext = _find_extension(extensions, cryptography.x509.TLSFeature)
has_tlsfeature = True
except AttributeError as dummy:
tlsfeature_ext = next(
(ext for ext in extensions if ext.value.oid == CRYPTOGRAPHY_MUST_STAPLE_NAME),
None
)
has_tlsfeature = False
if self.ocspMustStaple:
if not tlsfeature_ext or tlsfeature_ext.critical != self.ocspMustStaple_critical:
return False
if has_tlsfeature:
return cryptography.x509.TLSFeatureType.status_request in tlsfeature_ext.value
else:
return tlsfeature_ext.value.value == CRYPTOGRAPHY_MUST_STAPLE_VALUE
else:
return tlsfeature_ext is None
def _check_nameConstraints(extensions):
current_nc_ext = _find_extension(extensions, cryptography.x509.NameConstraints)
current_nc_perm = [str(altname) for altname in current_nc_ext.value.permitted_subtrees] if current_nc_ext else []
current_nc_excl = [str(altname) for altname in current_nc_ext.value.excluded_subtrees] if current_nc_ext else []
nc_perm = [str(cryptography_get_name(altname)) for altname in self.name_constraints_permitted]
nc_excl = [str(cryptography_get_name(altname)) for altname in self.name_constraints_excluded]
if set(nc_perm) != set(current_nc_perm) or set(nc_excl) != set(current_nc_excl):
return False
if nc_perm or nc_excl:
if current_nc_ext.critical != self.name_constraints_critical:
return False
return True
def _check_subject_key_identifier(extensions):
ext = _find_extension(extensions, cryptography.x509.SubjectKeyIdentifier)
if self.create_subject_key_identifier or self.subject_key_identifier is not None:
if not ext or ext.critical:
return False
if self.create_subject_key_identifier:
digest = cryptography.x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()).digest
return ext.value.digest == digest
else:
return ext.value.digest == self.subject_key_identifier
else:
return ext is None
def _check_authority_key_identifier(extensions):
ext = _find_extension(extensions, cryptography.x509.AuthorityKeyIdentifier)
if self.authority_key_identifier is not None or self.authority_cert_issuer is not None or self.authority_cert_serial_number is not None:
if not ext or ext.critical:
return False
aci = None
csr_aci = None
if self.authority_cert_issuer is not None:
aci = [str(cryptography_get_name(n)) for n in self.authority_cert_issuer]
if ext.value.authority_cert_issuer is not None:
csr_aci = [str(n) for n in ext.value.authority_cert_issuer]
return (ext.value.key_identifier == self.authority_key_identifier
and csr_aci == aci
and ext.value.authority_cert_serial_number == self.authority_cert_serial_number)
else:
return ext is None
def _check_extensions(csr):
extensions = csr.extensions
return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
_check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and
_check_ocspMustStaple(extensions) and _check_subject_key_identifier(extensions) and
_check_authority_key_identifier(extensions) and _check_nameConstraints(extensions))
def _check_signature(csr):
if not csr.is_signature_valid:
return False
# To check whether public key of CSR belongs to private key,
# encode both public keys and compare PEMs.
key_a = csr.public_key().public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.PEM,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
key_b = self.privatekey.public_key().public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.PEM,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
return key_a == key_b
try:
csr = load_certificate_request(self.path, backend='cryptography')
except Exception as dummy:
return False
return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
digest=dict(type='str', default='sha256'),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
privatekey_passphrase=dict(type='str', no_log=True),
version=dict(type='int', default=1),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
subject=dict(type='dict'),
country_name=dict(type='str', aliases=['C', 'countryName']),
state_or_province_name=dict(type='str', aliases=['ST', 'stateOrProvinceName']),
locality_name=dict(type='str', aliases=['L', 'localityName']),
organization_name=dict(type='str', aliases=['O', 'organizationName']),
organizational_unit_name=dict(type='str', aliases=['OU', 'organizationalUnitName']),
common_name=dict(type='str', aliases=['CN', 'commonName']),
email_address=dict(type='str', aliases=['E', 'emailAddress']),
subject_alt_name=dict(type='list', elements='str', aliases=['subjectAltName']),
subject_alt_name_critical=dict(type='bool', default=False, aliases=['subjectAltName_critical']),
use_common_name_for_san=dict(type='bool', default=True, aliases=['useCommonNameForSAN']),
key_usage=dict(type='list', elements='str', aliases=['keyUsage']),
key_usage_critical=dict(type='bool', default=False, aliases=['keyUsage_critical']),
extended_key_usage=dict(type='list', elements='str', aliases=['extKeyUsage', 'extendedKeyUsage']),
extended_key_usage_critical=dict(type='bool', default=False, aliases=['extKeyUsage_critical', 'extendedKeyUsage_critical']),
basic_constraints=dict(type='list', elements='str', aliases=['basicConstraints']),
basic_constraints_critical=dict(type='bool', default=False, aliases=['basicConstraints_critical']),
ocsp_must_staple=dict(type='bool', default=False, aliases=['ocspMustStaple']),
ocsp_must_staple_critical=dict(type='bool', default=False, aliases=['ocspMustStaple_critical']),
name_constraints_permitted=dict(type='list', elements='str'),
name_constraints_excluded=dict(type='list', elements='str'),
name_constraints_critical=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
create_subject_key_identifier=dict(type='bool', default=False),
subject_key_identifier=dict(type='str'),
authority_key_identifier=dict(type='str'),
authority_cert_issuer=dict(type='list', elements='str'),
authority_cert_serial_number=dict(type='int'),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
return_content=dict(type='bool', default=False),
),
required_together=[('authority_cert_issuer', 'authority_cert_serial_number')],
required_if=[('state', 'present', ['privatekey_path', 'privatekey_content'], True)],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
add_file_common_args=True,
supports_check_mode=True,
)
if module.params['version'] != 1:
module.deprecate('The version option will only support allowed values from community.crypto 2.0.0 on. '
'Currently, only the value 1 is allowed by RFC 2986',
version='2.0.0', collection_name='community.crypto')
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15 to generate CSRs')
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
csr = CertificateSigningRequestPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
csr = CertificateSigningRequestCryptography(module)
if module.params['state'] == 'present':
if module.check_mode:
result = csr.dump()
result['changed'] = module.params['force'] or not csr.check(module)
module.exit_json(**result)
csr.generate(module)
else:
if module.check_mode:
result = csr.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
csr.remove(module)
result = csr.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/good_4295_1 |
crossvul-python_data_good_4295_5 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Patrick Pichler <ppichler+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_signature
version_added: 1.1.0
short_description: Sign data with openssl
description:
- This module allows one to sign data using a private key.
- The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the PyOpenSSL backend
was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
requirements:
- Either cryptography >= 1.4 (some key types require newer versions)
- Or pyOpenSSL >= 0.11 (Ed25519 and Ed448 keys are not supported with this backend)
author:
- Patrick Pichler (@aveexy)
- Markus Teufelberger (@MarkusTeufelberger)
options:
privatekey_path:
description:
- The path to the private key to use when signing.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
type: path
privatekey_content:
description:
- The content of the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
type: str
privatekey_passphrase:
description:
- The passphrase for the private key.
- This is required if the private key is password protected.
type: str
path:
description:
- The file to sign.
- This file will only be read and not modified.
type: path
required: true
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
notes:
- |
When using the C(cryptography) backend, the following key types require at least the following C(cryptography) version:
RSA keys: C(cryptography) >= 1.4
DSA and ECDSA keys: C(cryptography) >= 1.5
ed448 and ed25519 keys: C(cryptography) >= 2.6
seealso:
- module: community.crypto.openssl_signature_info
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Sign example file
community.crypto.openssl_signature:
privatekey_path: private.key
path: /tmp/example_file
register: sig
- name: Verify signature of example file
community.crypto.openssl_signature_info:
certificate_path: cert.pem
path: /tmp/example_file
signature: "{{ sig.signature }}"
register: verify
- name: Make sure the signature is valid
assert:
that:
- verify.valid
'''
RETURN = r'''
signature:
description: Base64 encoded signature.
returned: success
type: str
'''
import os
import traceback
from distutils.version import LooseVersion
import base64
MINIMAL_PYOPENSSL_VERSION = '0.11'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.4'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.hazmat.primitives.asymmetric.padding
import cryptography.hazmat.primitives.hashes
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_DSA_SIGN,
CRYPTOGRAPHY_HAS_EC_SIGN,
CRYPTOGRAPHY_HAS_ED25519_SIGN,
CRYPTOGRAPHY_HAS_ED448_SIGN,
CRYPTOGRAPHY_HAS_RSA_SIGN,
OpenSSLObjectError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
)
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class SignatureBase(OpenSSLObject):
def __init__(self, module, backend):
super(SignatureBase, self).__init__(
path=module.params['path'],
state='present',
force=False,
check_mode=module.check_mode
)
self.backend = backend
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
def generate(self):
# Empty method because OpenSSLObject wants this
pass
def dump(self):
# Empty method because OpenSSLObject wants this
pass
# Implementation with using pyOpenSSL
class SignaturePyOpenSSL(SignatureBase):
def __init__(self, module, backend):
super(SignaturePyOpenSSL, self).__init__(module, backend)
def run(self):
result = dict()
try:
with open(self.path, "rb") as f:
_in = f.read()
private_key = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
signature = OpenSSL.crypto.sign(private_key, _in, "sha256")
result['signature'] = base64.b64encode(signature)
return result
except Exception as e:
raise OpenSSLObjectError(e)
# Implementation with using cryptography
class SignatureCryptography(SignatureBase):
def __init__(self, module, backend):
super(SignatureCryptography, self).__init__(module, backend)
def run(self):
_padding = cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15()
_hash = cryptography.hazmat.primitives.hashes.SHA256()
result = dict()
try:
with open(self.path, "rb") as f:
_in = f.read()
private_key = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
signature = None
if CRYPTOGRAPHY_HAS_DSA_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
signature = private_key.sign(_in, _hash)
if CRYPTOGRAPHY_HAS_EC_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
signature = private_key.sign(_in, cryptography.hazmat.primitives.asymmetric.ec.ECDSA(_hash))
if CRYPTOGRAPHY_HAS_ED25519_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
signature = private_key.sign(_in)
if CRYPTOGRAPHY_HAS_ED448_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
signature = private_key.sign(_in)
if CRYPTOGRAPHY_HAS_RSA_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
signature = private_key.sign(_in, _padding, _hash)
if signature is None:
self.module.fail_json(
msg="Unsupported key type. Your cryptography version is {0}".format(CRYPTOGRAPHY_VERSION)
)
result['signature'] = base64.b64encode(signature)
return result
except Exception as e:
raise OpenSSLObjectError(e)
def main():
module = AnsibleModule(
argument_spec=dict(
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
privatekey_passphrase=dict(type='str', no_log=True),
path=dict(type='path', required=True),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
),
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
required_one_of=(
['privatekey_path', 'privatekey_content'],
),
supports_check_mode=True,
)
if not os.path.isfile(module.params['path']):
module.fail_json(
name=module.params['path'],
msg='The file {0} does not exist'.format(module.params['path'])
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
_sign = SignaturePyOpenSSL(module, backend)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
_sign = SignatureCryptography(module, backend)
result = _sign.run()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/good_4295_5 |
crossvul-python_data_bad_4295_2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_privatekey
short_description: Generate OpenSSL private keys
description:
- This module allows one to (re)generate OpenSSL private keys.
- One can generate L(RSA,https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29),
L(DSA,https://en.wikipedia.org/wiki/Digital_Signature_Algorithm),
L(ECC,https://en.wikipedia.org/wiki/Elliptic-curve_cryptography) or
L(EdDSA,https://en.wikipedia.org/wiki/EdDSA) private keys.
- Keys are generated in PEM format.
- "Please note that the module regenerates private keys if they don't match
the module's options. In particular, if you provide another passphrase
(or specify none), change the keysize, etc., the private key will be
regenerated. If you are concerned that this could **overwrite your private key**,
consider using the I(backup) option."
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in Ansible 2.13."
requirements:
- Either cryptography >= 1.2.3 (older versions might work as well)
- Or pyOpenSSL
author:
- Yanis Guenane (@Spredzy)
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the private key should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
size:
description:
- Size (in bits) of the TLS/SSL key to generate.
type: int
default: 4096
type:
description:
- The algorithm used to generate the TLS/SSL private key.
- Note that C(ECC), C(X25519), C(X448), C(Ed25519) and C(Ed448) require the C(cryptography) backend.
C(X25519) needs cryptography 2.5 or newer, while C(X448), C(Ed25519) and C(Ed448) require
cryptography 2.6 or newer. For C(ECC), the minimal cryptography version required depends on the
I(curve) option.
type: str
default: RSA
choices: [ DSA, ECC, Ed25519, Ed448, RSA, X25519, X448 ]
curve:
description:
- Note that not all curves are supported by all versions of C(cryptography).
- For maximal interoperability, C(secp384r1) or C(secp256r1) should be used.
- We use the curve names as defined in the
L(IANA registry for TLS,https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8).
type: str
choices:
- secp384r1
- secp521r1
- secp224r1
- secp192r1
- secp256r1
- secp256k1
- brainpoolP256r1
- brainpoolP384r1
- brainpoolP512r1
- sect571k1
- sect409k1
- sect283k1
- sect233k1
- sect163k1
- sect571r1
- sect409r1
- sect283r1
- sect233r1
- sect163r2
force:
description:
- Should the key be regenerated even if it already exists.
type: bool
default: no
path:
description:
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
type: path
required: true
passphrase:
description:
- The passphrase for the private key.
type: str
cipher:
description:
- The cipher to encrypt the private key. (Valid values can be found by
running `openssl list -cipher-algorithms` or `openssl list-cipher-algorithms`,
depending on your OpenSSL version.)
- When using the C(cryptography) backend, use C(auto).
type: str
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
format:
description:
- Determines which format the private key is written in. By default, PKCS1 (traditional OpenSSL format)
is used for all keys which support it. Please note that not every key can be exported in any format.
- The value C(auto) selects a fromat based on the key format. The value C(auto_ignore) does the same,
but for existing private key files, it will not force a regenerate when its format is not the automatically
selected one for generation.
- Note that if the format for an existing private key mismatches, the key is *regenerated* by default.
To change this behavior, use the I(format_mismatch) option.
- The I(format) option is only supported by the C(cryptography) backend. The C(pyopenssl) backend will
fail if a value different from C(auto_ignore) is used.
type: str
default: auto_ignore
choices: [ pkcs1, pkcs8, raw, auto, auto_ignore ]
version_added: '1.0.0'
format_mismatch:
description:
- Determines behavior of the module if the format of a private key does not match the expected format, but all
other parameters are as expected.
- If set to C(regenerate) (default), generates a new private key.
- If set to C(convert), the key will be converted to the new format instead.
- Only supported by the C(cryptography) backend.
type: str
default: regenerate
choices: [ regenerate, convert ]
version_added: '1.0.0'
backup:
description:
- Create a backup file including a timestamp so you can get
the original private key back if you overwrote it with a new one by accident.
type: bool
default: no
return_content:
description:
- If set to C(yes), will return the (current or generated) private key's content as I(privatekey).
- Note that especially if the private key is not encrypted, you have to make sure that the returned
value is treated appropriately and not accidentally written to logs etc.! Use with care!
type: bool
default: no
version_added: '1.0.0'
regenerate:
description:
- Allows to configure in which situations the module is allowed to regenerate private keys.
The module will always generate a new key if the destination file does not exist.
- By default, the key will be regenerated when it doesn't match the module's options,
except when the key cannot be read or the passphrase does not match. Please note that
this B(changed) for Ansible 2.10. For Ansible 2.9, the behavior was as if C(full_idempotence)
is specified.
- If set to C(never), the module will fail if the key cannot be read or the passphrase
isn't matching, and will never regenerate an existing key.
- If set to C(fail), the module will fail if the key does not correspond to the module's
options.
- If set to C(partial_idempotence), the key will be regenerated if it does not conform to
the module's options. The key is B(not) regenerated if it cannot be read (broken file),
the key is protected by an unknown passphrase, or when they key is not protected by a
passphrase, but a passphrase is specified.
- If set to C(full_idempotence), the key will be regenerated if it does not conform to the
module's options. This is also the case if the key cannot be read (broken file), the key
is protected by an unknown passphrase, or when they key is not protected by a passphrase,
but a passphrase is specified. Make sure you have a B(backup) when using this option!
- If set to C(always), the module will always regenerate the key. This is equivalent to
setting I(force) to C(yes).
- Note that if I(format_mismatch) is set to C(convert) and everything matches except the
format, the key will always be converted, except if I(regenerate) is set to C(always).
type: str
choices:
- never
- fail
- partial_idempotence
- full_idempotence
- always
default: full_idempotence
version_added: '1.0.0'
extends_documentation_fragment:
- files
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA) and a passphrase
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
passphrase: ansible
cipher: aes256
- name: Generate an OpenSSL private key with a different size (2048 bits)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
size: 2048
- name: Force regenerate an OpenSSL private key if it already exists
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
force: yes
- name: Generate an OpenSSL private key with a different algorithm (DSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
type: DSA
'''
RETURN = r'''
size:
description: Size (in bits) of the TLS/SSL private key.
returned: changed or success
type: int
sample: 4096
type:
description: Algorithm used to generate the TLS/SSL private key.
returned: changed or success
type: str
sample: RSA
curve:
description: Elliptic curve used to generate the TLS/SSL private key.
returned: changed or success, and I(type) is C(ECC)
type: str
sample: secp256r1
filename:
description: Path to the generated TLS/SSL private key file.
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
fingerprint:
description:
- The fingerprint of the public key. Fingerprint will be generated for each C(hashlib.algorithms) available.
- The PyOpenSSL backend requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/privatekey.pem.2019-03-09@11:22~
privatekey:
description:
- The (current or generated) private key's content.
- Will be Base64-encoded if the key is in raw format.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import abc
import base64
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_X25519,
CRYPTOGRAPHY_HAS_X25519_FULL,
CRYPTOGRAPHY_HAS_X448,
CRYPTOGRAPHY_HAS_ED25519,
CRYPTOGRAPHY_HAS_ED448,
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint,
get_fingerprint_of_bytes,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.identify import (
identify_private_key_format,
)
MINIMAL_PYOPENSSL_VERSION = '0.6'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.dsa
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.utils
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class PrivateKeyError(OpenSSLObjectError):
pass
class PrivateKeyBase(OpenSSLObject):
def __init__(self, module):
super(PrivateKeyBase, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.size = module.params['size']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.privatekey = None
self.fingerprint = {}
self.format = module.params['format']
self.format_mismatch = module.params['format_mismatch']
self.privatekey_bytes = None
self.return_content = module.params['return_content']
self.regenerate = module.params['regenerate']
if self.regenerate == 'always':
self.force = True
self.backup = module.params['backup']
self.backup_file = None
if module.params['mode'] is None:
module.params['mode'] = '0600'
@abc.abstractmethod
def _generate_private_key(self):
"""(Re-)Generate private key."""
pass
@abc.abstractmethod
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
pass
@abc.abstractmethod
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
pass
@abc.abstractmethod
def _get_fingerprint(self):
pass
def generate(self, module):
"""Generate a keypair."""
if not self.check(module, perms_required=False, ignore_conversion=True) or self.force:
# Regenerate
if self.backup:
self.backup_file = module.backup_local(self.path)
self._generate_private_key()
privatekey_data = self._get_private_key_data()
if self.return_content:
self.privatekey_bytes = privatekey_data
write_file(module, privatekey_data, 0o600)
self.changed = True
elif not self.check(module, perms_required=False, ignore_conversion=False):
# Convert
if self.backup:
self.backup_file = module.backup_local(self.path)
self._ensure_private_key_loaded()
privatekey_data = self._get_private_key_data()
if self.return_content:
self.privatekey_bytes = privatekey_data
write_file(module, privatekey_data, 0o600)
self.changed = True
self.fingerprint = self._get_fingerprint()
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(PrivateKeyBase, self).remove(module)
@abc.abstractmethod
def _check_passphrase(self):
pass
@abc.abstractmethod
def _check_size_and_type(self):
pass
@abc.abstractmethod
def _check_format(self):
pass
def check(self, module, perms_required=True, ignore_conversion=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PrivateKeyBase, self).check(module, perms_required=False)
if not state_and_perms:
# key does not exist
return False
if not self._check_passphrase():
if self.regenerate in ('full_idempotence', 'always'):
return False
module.fail_json(msg='Unable to read the key. The key is protected with a another passphrase / no passphrase or broken.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `full_idempotence` or `always`, or with `force=yes`.')
if self.regenerate != 'never':
if not self._check_size_and_type():
if self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
return False
module.fail_json(msg='Key has wrong type and/or size.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.')
if not self._check_format():
# During conversion step, convert if format does not match and format_mismatch == 'convert'
if not ignore_conversion and self.format_mismatch == 'convert':
return False
# During generation step, regenerate if format does not match and format_mismatch == 'regenerate'
if ignore_conversion and self.format_mismatch == 'regenerate' and self.regenerate != 'never':
if not ignore_conversion or self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
return False
module.fail_json(msg='Key has wrong format.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.'
' To convert the key, set `format_mismatch` to `convert`.')
# check whether permissions are correct (in case that needs to be checked)
return not perms_required or super(PrivateKeyBase, self).check(module, perms_required=perms_required)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'size': self.size,
'filename': self.path,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.privatekey_bytes is None:
self.privatekey_bytes = load_file_if_exists(self.path, ignore_errors=True)
if self.privatekey_bytes:
if identify_private_key_format(self.privatekey_bytes) == 'raw':
result['privatekey'] = base64.b64encode(self.privatekey_bytes)
else:
result['privatekey'] = self.privatekey_bytes.decode('utf-8')
else:
result['privatekey'] = None
return result
# Implementation with using pyOpenSSL
class PrivateKeyPyOpenSSL(PrivateKeyBase):
def __init__(self, module):
super(PrivateKeyPyOpenSSL, self).__init__(module)
if module.params['type'] == 'RSA':
self.type = crypto.TYPE_RSA
elif module.params['type'] == 'DSA':
self.type = crypto.TYPE_DSA
else:
module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.")
if self.format != 'auto_ignore':
module.fail_json(msg="PyOpenSSL backend only supports auto_ignore format.")
def _generate_private_key(self):
"""(Re-)Generate private key."""
self.privatekey = crypto.PKey()
try:
self.privatekey.generate_key(self.type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
if self.privatekey is None:
try:
self.privatekey = privatekey = load_privatekey(self.path, self.passphrase)
except OpenSSLBadPassphraseError as exc:
raise PrivateKeyError(exc)
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
if self.cipher and self.passphrase:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
self.cipher, to_bytes(self.passphrase))
else:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey)
def _get_fingerprint(self):
return get_fingerprint(self.path, self.passphrase)
def _check_passphrase(self):
try:
load_privatekey(self.path, self.passphrase)
return True
except Exception as dummy:
return False
def _check_size_and_type(self):
def _check_size(privatekey):
return self.size == privatekey.bits()
def _check_type(privatekey):
return self.type == privatekey.type()
self._ensure_private_key_loaded()
return _check_size(self.privatekey) and _check_type(self.privatekey)
def _check_format(self):
# Not supported by this backend
return True
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyPyOpenSSL, self).dump()
if self.type == crypto.TYPE_RSA:
result['type'] = 'RSA'
else:
result['type'] = 'DSA'
return result
# Implementation with using cryptography
class PrivateKeyCryptography(PrivateKeyBase):
def _get_ec_class(self, ectype):
ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype)
if ecclass is None:
self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype))
return ecclass
def _add_curve(self, name, ectype, deprecated=False):
def create(size):
ecclass = self._get_ec_class(ectype)
return ecclass()
def verify(privatekey):
ecclass = self._get_ec_class(ectype)
return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass)
self.curves[name] = {
'create': create,
'verify': verify,
'deprecated': deprecated,
}
def __init__(self, module):
super(PrivateKeyCryptography, self).__init__(module)
self.curves = dict()
self._add_curve('secp384r1', 'SECP384R1')
self._add_curve('secp521r1', 'SECP521R1')
self._add_curve('secp224r1', 'SECP224R1')
self._add_curve('secp192r1', 'SECP192R1')
self._add_curve('secp256r1', 'SECP256R1')
self._add_curve('secp256k1', 'SECP256K1')
self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True)
self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True)
self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True)
self._add_curve('sect571k1', 'SECT571K1', deprecated=True)
self._add_curve('sect409k1', 'SECT409K1', deprecated=True)
self._add_curve('sect283k1', 'SECT283K1', deprecated=True)
self._add_curve('sect233k1', 'SECT233K1', deprecated=True)
self._add_curve('sect163k1', 'SECT163K1', deprecated=True)
self._add_curve('sect571r1', 'SECT571R1', deprecated=True)
self._add_curve('sect409r1', 'SECT409R1', deprecated=True)
self._add_curve('sect283r1', 'SECT283R1', deprecated=True)
self._add_curve('sect233r1', 'SECT233R1', deprecated=True)
self._add_curve('sect163r2', 'SECT163R2', deprecated=True)
self.module = module
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
self.type = module.params['type']
self.curve = module.params['curve']
if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519')
if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519 serialization')
if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.module.fail_json(msg='Your cryptography version does not support X448')
if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.module.fail_json(msg='Your cryptography version does not support Ed25519')
if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.module.fail_json(msg='Your cryptography version does not support Ed448')
def _get_wanted_format(self):
if self.format not in ('auto', 'auto_ignore'):
return self.format
if self.type in ('X25519', 'X448', 'Ed25519', 'Ed448'):
return 'pkcs8'
else:
return 'pkcs1'
def _generate_private_key(self):
"""(Re-)Generate private key."""
try:
if self.type == 'RSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537, # OpenSSL always uses this
key_size=self.size,
backend=self.cryptography_backend
)
if self.type == 'DSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key(
key_size=self.size,
backend=self.cryptography_backend
)
if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate()
if self.type == 'ECC' and self.curve in self.curves:
if self.curves[self.curve]['deprecated']:
self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve))
self.privatekey = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key(
curve=self.curves[self.curve]['create'](self.size),
backend=self.cryptography_backend
)
except cryptography.exceptions.UnsupportedAlgorithm as dummy:
self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type))
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
if self.privatekey is None:
self.privatekey = self._load_privatekey()
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
# Select export format and encoding
try:
export_format = self._get_wanted_format()
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.PEM
if export_format == 'pkcs1':
# "TraditionalOpenSSL" format is PKCS1
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL
elif export_format == 'pkcs8':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
elif export_format == 'raw':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.Raw
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.Raw
except AttributeError:
self.module.fail_json(msg='Cryptography backend does not support the selected output format "{0}"'.format(self.format))
# Select key encryption
encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption()
if self.cipher and self.passphrase:
if self.cipher == 'auto':
encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase))
else:
self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.')
# Serialize key
try:
return self.privatekey.private_bytes(
encoding=export_encoding,
format=export_format,
encryption_algorithm=encryption_algorithm
)
except ValueError as dummy:
self.module.fail_json(
msg='Cryptography backend cannot serialize the private key in the required format "{0}"'.format(self.format)
)
except Exception as dummy:
self.module.fail_json(
msg='Error while serializing the private key in the required format "{0}"'.format(self.format),
exception=traceback.format_exc()
)
def _load_privatekey(self):
try:
# Read bytes
with open(self.path, 'rb') as f:
data = f.read()
# Interpret bytes depending on format.
format = identify_private_key_format(data)
if format == 'raw':
if len(data) == 56 and CRYPTOGRAPHY_HAS_X448:
return cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.from_private_bytes(data)
if len(data) == 57 and CRYPTOGRAPHY_HAS_ED448:
return cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.from_private_bytes(data)
if len(data) == 32:
if CRYPTOGRAPHY_HAS_X25519 and (self.type == 'X25519' or not CRYPTOGRAPHY_HAS_ED25519):
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_ED25519 and (self.type == 'Ed25519' or not CRYPTOGRAPHY_HAS_X25519):
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_X25519 and CRYPTOGRAPHY_HAS_ED25519:
try:
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
except Exception:
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
raise PrivateKeyError('Cannot load raw key')
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise PrivateKeyError(e)
def _get_fingerprint(self):
# Get bytes of public key
private_key = self._load_privatekey()
public_key = private_key.public_key()
public_key_bytes = public_key.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
# Get fingerprints of public_key_bytes
return get_fingerprint_of_bytes(public_key_bytes)
def _check_passphrase(self):
try:
with open(self.path, 'rb') as f:
data = f.read()
format = identify_private_key_format(data)
if format == 'raw':
# Raw keys cannot be encrypted. To avoid incompatibilities, we try to
# actually load the key (and return False when this fails).
self._load_privatekey()
# Loading the key succeeded. Only return True when no passphrase was
# provided.
return self.passphrase is None
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as dummy:
return False
def _check_size_and_type(self):
self._ensure_private_key_loaded()
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return self.type == 'RSA' and self.size == self.privatekey.key_size
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
return self.type == 'DSA' and self.size == self.privatekey.key_size
if CRYPTOGRAPHY_HAS_X25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
return self.type == 'X25519'
if CRYPTOGRAPHY_HAS_X448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
return self.type == 'X448'
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
return self.type == 'Ed25519'
if CRYPTOGRAPHY_HAS_ED448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
return self.type == 'Ed448'
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
if self.type != 'ECC':
return False
if self.curve not in self.curves:
return False
return self.curves[self.curve]['verify'](self.privatekey)
return False
def _check_format(self):
if self.format == 'auto_ignore':
return True
try:
with open(self.path, 'rb') as f:
content = f.read()
format = identify_private_key_format(content)
return format == self._get_wanted_format()
except Exception as dummy:
return False
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyCryptography, self).dump()
result['type'] = self.type
if self.type == 'ECC':
result['curve'] = self.curve
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
size=dict(type='int', default=4096),
type=dict(type='str', default='RSA', choices=[
'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448'
]),
curve=dict(type='str', choices=[
'secp384r1', 'secp521r1', 'secp224r1', 'secp192r1', 'secp256r1',
'secp256k1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1',
'sect571k1', 'sect409k1', 'sect283k1', 'sect233k1', 'sect163k1',
'sect571r1', 'sect409r1', 'sect283r1', 'sect233r1', 'sect163r2',
]),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
backup=dict(type='bool', default=False),
format=dict(type='str', default='auto_ignore', choices=['pkcs1', 'pkcs8', 'raw', 'auto', 'auto_ignore']),
format_mismatch=dict(type='str', default='regenerate', choices=['regenerate', 'convert']),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
return_content=dict(type='bool', default=False),
regenerate=dict(
type='str',
default='full_idempotence',
choices=['never', 'fail', 'partial_idempotence', 'full_idempotence', 'always']
),
),
supports_check_mode=True,
add_file_common_args=True,
required_together=[
['cipher', 'passphrase']
],
required_if=[
['type', 'ECC', ['curve']],
],
)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto':
# First try pyOpenSSL, then cryptography
if can_use_pyopenssl:
backend = 'pyopenssl'
elif can_use_cryptography:
backend = 'cryptography'
else:
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
private_key = PrivateKeyPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
private_key = PrivateKeyCryptography(module)
if private_key.state == 'present':
if module.check_mode:
result = private_key.dump()
result['changed'] = private_key.force \
or not private_key.check(module, ignore_conversion=True) \
or not private_key.check(module, ignore_conversion=False)
module.exit_json(**result)
private_key.generate(module)
else:
if module.check_mode:
result = private_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
private_key.remove(module)
result = private_key.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/bad_4295_2 |
crossvul-python_data_bad_4295_5 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Patrick Pichler <ppichler+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_signature
version_added: 1.1.0
short_description: Sign data with openssl
description:
- This module allows one to sign data using a private key.
- The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the PyOpenSSL backend
was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
requirements:
- Either cryptography >= 1.4 (some key types require newer versions)
- Or pyOpenSSL >= 0.11 (Ed25519 and Ed448 keys are not supported with this backend)
author:
- Patrick Pichler (@aveexy)
- Markus Teufelberger (@MarkusTeufelberger)
options:
privatekey_path:
description:
- The path to the private key to use when signing.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
type: path
privatekey_content:
description:
- The content of the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
type: str
privatekey_passphrase:
description:
- The passphrase for the private key.
- This is required if the private key is password protected.
type: str
path:
description:
- The file to sign.
- This file will only be read and not modified.
type: path
required: true
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
notes:
- |
When using the C(cryptography) backend, the following key types require at least the following C(cryptography) version:
RSA keys: C(cryptography) >= 1.4
DSA and ECDSA keys: C(cryptography) >= 1.5
ed448 and ed25519 keys: C(cryptography) >= 2.6
seealso:
- module: community.crypto.openssl_signature_info
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Sign example file
community.crypto.openssl_signature:
privatekey_path: private.key
path: /tmp/example_file
register: sig
- name: Verify signature of example file
community.crypto.openssl_signature_info:
certificate_path: cert.pem
path: /tmp/example_file
signature: "{{ sig.signature }}"
register: verify
- name: Make sure the signature is valid
assert:
that:
- verify.valid
'''
RETURN = r'''
signature:
description: Base64 encoded signature.
returned: success
type: str
'''
import os
import traceback
from distutils.version import LooseVersion
import base64
MINIMAL_PYOPENSSL_VERSION = '0.11'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.4'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.hazmat.primitives.asymmetric.padding
import cryptography.hazmat.primitives.hashes
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_DSA_SIGN,
CRYPTOGRAPHY_HAS_EC_SIGN,
CRYPTOGRAPHY_HAS_ED25519_SIGN,
CRYPTOGRAPHY_HAS_ED448_SIGN,
CRYPTOGRAPHY_HAS_RSA_SIGN,
OpenSSLObjectError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
)
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class SignatureBase(OpenSSLObject):
def __init__(self, module, backend):
super(SignatureBase, self).__init__(
path=module.params['path'],
state='present',
force=False,
check_mode=module.check_mode
)
self.backend = backend
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
def generate(self):
# Empty method because OpenSSLObject wants this
pass
def dump(self):
# Empty method because OpenSSLObject wants this
pass
# Implementation with using pyOpenSSL
class SignaturePyOpenSSL(SignatureBase):
def __init__(self, module, backend):
super(SignaturePyOpenSSL, self).__init__(module, backend)
def run(self):
result = dict()
try:
with open(self.path, "rb") as f:
_in = f.read()
private_key = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
signature = OpenSSL.crypto.sign(private_key, _in, "sha256")
result['signature'] = base64.b64encode(signature)
return result
except Exception as e:
raise OpenSSLObjectError(e)
# Implementation with using cryptography
class SignatureCryptography(SignatureBase):
def __init__(self, module, backend):
super(SignatureCryptography, self).__init__(module, backend)
def run(self):
_padding = cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15()
_hash = cryptography.hazmat.primitives.hashes.SHA256()
result = dict()
try:
with open(self.path, "rb") as f:
_in = f.read()
private_key = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
signature = None
if CRYPTOGRAPHY_HAS_DSA_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
signature = private_key.sign(_in, _hash)
if CRYPTOGRAPHY_HAS_EC_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
signature = private_key.sign(_in, cryptography.hazmat.primitives.asymmetric.ec.ECDSA(_hash))
if CRYPTOGRAPHY_HAS_ED25519_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
signature = private_key.sign(_in)
if CRYPTOGRAPHY_HAS_ED448_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
signature = private_key.sign(_in)
if CRYPTOGRAPHY_HAS_RSA_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
signature = private_key.sign(_in, _padding, _hash)
if signature is None:
self.module.fail_json(
msg="Unsupported key type. Your cryptography version is {0}".format(CRYPTOGRAPHY_VERSION)
)
result['signature'] = base64.b64encode(signature)
return result
except Exception as e:
raise OpenSSLObjectError(e)
def main():
module = AnsibleModule(
argument_spec=dict(
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
privatekey_passphrase=dict(type='str', no_log=True),
path=dict(type='path', required=True),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
),
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
required_one_of=(
['privatekey_path', 'privatekey_content'],
),
supports_check_mode=True,
)
if not os.path.isfile(module.params['path']):
module.fail_json(
name=module.params['path'],
msg='The file {0} does not exist'.format(module.params['path'])
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
_sign = SignaturePyOpenSSL(module, backend)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
_sign = SignatureCryptography(module, backend)
result = _sign.run()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/bad_4295_5 |
crossvul-python_data_good_4295_3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_privatekey_info
short_description: Provide information for OpenSSL private keys
description:
- This module allows one to query information on OpenSSL private keys.
- In case the key consistency checks fail, the module will fail as this indicates a faked
private key. In this case, all return variables are still returned. Note that key consistency
checks are not available all key types; if none is available, C(none) is returned for
C(key_is_consistent).
- It uses the pyOpenSSL or cryptography python library to interact with OpenSSL. If both the
cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with
C(select_crypto_backend)). Please note that the PyOpenSSL backend was deprecated in Ansible 2.9
and will be removed in community.crypto 2.0.0.
requirements:
- PyOpenSSL >= 0.15 or cryptography >= 1.2.3
author:
- Felix Fontein (@felixfontein)
- Yanis Guenane (@Spredzy)
options:
path:
description:
- Remote absolute path where the private key file is loaded from.
type: path
content:
description:
- Content of the private key file.
- Either I(path) or I(content) must be specified, but not both.
type: str
version_added: '1.0.0'
passphrase:
description:
- The passphrase for the private key.
type: str
return_private_key_data:
description:
- Whether to return private key data.
- Only set this to C(yes) when you want private information about this key to
leave the remote machine.
- "WARNING: you have to make sure that private key data isn't accidentally logged!"
type: bool
default: no
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
seealso:
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Get information on generated key
community.crypto.openssl_privatekey_info:
path: /etc/ssl/private/ansible.com.pem
register: result
- name: Dump information
debug:
var: result
'''
RETURN = r'''
can_load_key:
description: Whether the module was able to load the private key from disk
returned: always
type: bool
can_parse_key:
description: Whether the module was able to parse the private key
returned: always
type: bool
key_is_consistent:
description:
- Whether the key is consistent. Can also return C(none) next to C(yes) and
C(no), to indicate that consistency couldn't be checked.
- In case the check returns C(no), the module will fail.
returned: always
type: bool
public_key:
description: Private key's public key in PEM format
returned: success
type: str
sample: "-----BEGIN PUBLIC KEY-----\nMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A..."
public_key_fingerprints:
description:
- Fingerprints of private key's public key.
- For every hash algorithm available, the fingerprint is computed.
returned: success
type: dict
sample: "{'sha256': 'd4:b3:aa:6d:c8:04:ce:4e:ba:f6:29:4d:92:a3:94:b0:c2:ff:bd:bf:33:63:11:43:34:0f:51:b0:95:09:2f:63',
'sha512': 'f7:07:4a:f0:b0:f0:e6:8b:95:5f:f9:e6:61:0a:32:68:f1..."
type:
description:
- The key's type.
- One of C(RSA), C(DSA), C(ECC), C(Ed25519), C(X25519), C(Ed448), or C(X448).
- Will start with C(unknown) if the key type cannot be determined.
returned: success
type: str
sample: RSA
public_data:
description:
- Public key data. Depends on key type.
returned: success
type: dict
private_data:
description:
- Private key data. Depends on key type.
returned: success and when I(return_private_key_data) is set to C(yes)
type: dict
'''
import abc
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_X25519,
CRYPTOGRAPHY_HAS_X448,
CRYPTOGRAPHY_HAS_ED25519,
CRYPTOGRAPHY_HAS_ED448,
OpenSSLObjectError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint_of_bytes,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.math import (
binary_exp_mod,
quick_is_not_prime,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
MINIMAL_PYOPENSSL_VERSION = '0.15'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography.hazmat.primitives import serialization
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
SIGNATURE_TEST_DATA = b'1234'
def _get_cryptography_key_info(key):
key_public_data = dict()
key_private_data = dict()
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
key_type = 'RSA'
key_public_data['size'] = key.key_size
key_public_data['modulus'] = key.public_key().public_numbers().n
key_public_data['exponent'] = key.public_key().public_numbers().e
key_private_data['p'] = key.private_numbers().p
key_private_data['q'] = key.private_numbers().q
key_private_data['exponent'] = key.private_numbers().d
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
key_type = 'DSA'
key_public_data['size'] = key.key_size
key_public_data['p'] = key.parameters().parameter_numbers().p
key_public_data['q'] = key.parameters().parameter_numbers().q
key_public_data['g'] = key.parameters().parameter_numbers().g
key_public_data['y'] = key.public_key().public_numbers().y
key_private_data['x'] = key.private_numbers().x
elif CRYPTOGRAPHY_HAS_X25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
key_type = 'X25519'
elif CRYPTOGRAPHY_HAS_X448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
key_type = 'X448'
elif CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
key_type = 'Ed25519'
elif CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
key_type = 'Ed448'
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
key_type = 'ECC'
key_public_data['curve'] = key.public_key().curve.name
key_public_data['x'] = key.public_key().public_numbers().x
key_public_data['y'] = key.public_key().public_numbers().y
key_public_data['exponent_size'] = key.public_key().curve.key_size
key_private_data['multiplier'] = key.private_numbers().private_value
else:
key_type = 'unknown ({0})'.format(type(key))
return key_type, key_public_data, key_private_data
def _check_dsa_consistency(key_public_data, key_private_data):
# Get parameters
p = key_public_data.get('p')
q = key_public_data.get('q')
g = key_public_data.get('g')
y = key_public_data.get('y')
x = key_private_data.get('x')
for v in (p, q, g, y, x):
if v is None:
return None
# Make sure that g is not 0, 1 or -1 in Z/pZ
if g < 2 or g >= p - 1:
return False
# Make sure that x is in range
if x < 1 or x >= q:
return False
# Check whether q divides p-1
if (p - 1) % q != 0:
return False
# Check that g**q mod p == 1
if binary_exp_mod(g, q, p) != 1:
return False
# Check whether g**x mod p == y
if binary_exp_mod(g, x, p) != y:
return False
# Check (quickly) whether p or q are not primes
if quick_is_not_prime(q) or quick_is_not_prime(p):
return False
return True
def _is_cryptography_key_consistent(key, key_public_data, key_private_data):
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return bool(key._backend._lib.RSA_check_key(key._rsa_cdata))
if isinstance(key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
result = _check_dsa_consistency(key_public_data, key_private_data)
if result is not None:
return result
try:
signature = key.sign(SIGNATURE_TEST_DATA, cryptography.hazmat.primitives.hashes.SHA256())
except AttributeError:
# sign() was added in cryptography 1.5, but we support older versions
return None
try:
key.public_key().verify(
signature,
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.hashes.SHA256()
)
return True
except cryptography.exceptions.InvalidSignature:
return False
if isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
try:
signature = key.sign(
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.asymmetric.ec.ECDSA(cryptography.hazmat.primitives.hashes.SHA256())
)
except AttributeError:
# sign() was added in cryptography 1.5, but we support older versions
return None
try:
key.public_key().verify(
signature,
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.asymmetric.ec.ECDSA(cryptography.hazmat.primitives.hashes.SHA256())
)
return True
except cryptography.exceptions.InvalidSignature:
return False
has_simple_sign_function = False
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
has_simple_sign_function = True
if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
has_simple_sign_function = True
if has_simple_sign_function:
signature = key.sign(SIGNATURE_TEST_DATA)
try:
key.public_key().verify(signature, SIGNATURE_TEST_DATA)
return True
except cryptography.exceptions.InvalidSignature:
return False
# For X25519 and X448, there's no test yet.
return None
class PrivateKeyInfo(OpenSSLObject):
def __init__(self, module, backend):
super(PrivateKeyInfo, self).__init__(
module.params['path'] or '',
'present',
False,
module.check_mode,
)
self.backend = backend
self.module = module
self.content = module.params['content']
self.passphrase = module.params['passphrase']
self.return_private_key_data = module.params['return_private_key_data']
def generate(self):
# Empty method because OpenSSLObject wants this
pass
def dump(self):
# Empty method because OpenSSLObject wants this
pass
@abc.abstractmethod
def _get_public_key(self, binary):
pass
@abc.abstractmethod
def _get_key_info(self):
pass
@abc.abstractmethod
def _is_key_consistent(self, key_public_data, key_private_data):
pass
def get_info(self):
result = dict(
can_load_key=False,
can_parse_key=False,
key_is_consistent=None,
)
if self.content is not None:
priv_key_detail = self.content.encode('utf-8')
result['can_load_key'] = True
else:
try:
with open(self.path, 'rb') as b_priv_key_fh:
priv_key_detail = b_priv_key_fh.read()
result['can_load_key'] = True
except (IOError, OSError) as exc:
self.module.fail_json(msg=to_native(exc), **result)
try:
self.key = load_privatekey(
path=None,
content=priv_key_detail,
passphrase=to_bytes(self.passphrase) if self.passphrase is not None else self.passphrase,
backend=self.backend
)
result['can_parse_key'] = True
except OpenSSLObjectError as exc:
self.module.fail_json(msg=to_native(exc), **result)
result['public_key'] = self._get_public_key(binary=False)
pk = self._get_public_key(binary=True)
result['public_key_fingerprints'] = get_fingerprint_of_bytes(pk) if pk is not None else dict()
key_type, key_public_data, key_private_data = self._get_key_info()
result['type'] = key_type
result['public_data'] = key_public_data
if self.return_private_key_data:
result['private_data'] = key_private_data
result['key_is_consistent'] = self._is_key_consistent(key_public_data, key_private_data)
if result['key_is_consistent'] is False:
# Only fail when it is False, to avoid to fail on None (which means "we don't know")
result['key_is_consistent'] = False
self.module.fail_json(
msg="Private key is not consistent! (See "
"https://blog.hboeck.de/archives/888-How-I-tricked-Symantec-with-a-Fake-Private-Key.html)",
**result
)
return result
class PrivateKeyInfoCryptography(PrivateKeyInfo):
"""Validate the supplied private key, using the cryptography backend"""
def __init__(self, module):
super(PrivateKeyInfoCryptography, self).__init__(module, 'cryptography')
def _get_public_key(self, binary):
return self.key.public_key().public_bytes(
serialization.Encoding.DER if binary else serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
def _get_key_info(self):
return _get_cryptography_key_info(self.key)
def _is_key_consistent(self, key_public_data, key_private_data):
return _is_cryptography_key_consistent(self.key, key_public_data, key_private_data)
class PrivateKeyInfoPyOpenSSL(PrivateKeyInfo):
"""validate the supplied private key."""
def __init__(self, module):
super(PrivateKeyInfoPyOpenSSL, self).__init__(module, 'pyopenssl')
def _get_public_key(self, binary):
try:
return crypto.dump_publickey(
crypto.FILETYPE_ASN1 if binary else crypto.FILETYPE_PEM,
self.key
)
except AttributeError:
try:
# pyOpenSSL < 16.0:
bio = crypto._new_mem_buf()
if binary:
rc = crypto._lib.i2d_PUBKEY_bio(bio, self.key._pkey)
else:
rc = crypto._lib.PEM_write_bio_PUBKEY(bio, self.key._pkey)
if rc != 1:
crypto._raise_current_error()
return crypto._bio_to_string(bio)
except AttributeError:
self.module.warn('Your pyOpenSSL version does not support dumping public keys. '
'Please upgrade to version 16.0 or newer, or use the cryptography backend.')
def bigint_to_int(self, bn):
'''Convert OpenSSL BIGINT to Python integer'''
if bn == OpenSSL._util.ffi.NULL:
return None
hexstr = OpenSSL._util.lib.BN_bn2hex(bn)
try:
return int(OpenSSL._util.ffi.string(hexstr), 16)
finally:
OpenSSL._util.lib.OPENSSL_free(hexstr)
def _get_key_info(self):
key_public_data = dict()
key_private_data = dict()
openssl_key_type = self.key.type()
try_fallback = True
if crypto.TYPE_RSA == openssl_key_type:
key_type = 'RSA'
key_public_data['size'] = self.key.bits()
try:
# Use OpenSSL directly to extract key data
key = OpenSSL._util.lib.EVP_PKEY_get1_RSA(self.key._pkey)
key = OpenSSL._util.ffi.gc(key, OpenSSL._util.lib.RSA_free)
# OpenSSL 1.1 and newer have functions to extract the parameters
# from the EVP PKEY data structures. Older versions didn't have
# these getters, and it was common use to simply access the values
# directly. Since there's no guarantee that these data structures
# will still be accessible in the future, we use the getters for
# 1.1 and later, and directly access the values for 1.0.x and
# earlier.
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# Get modulus and exponents
n = OpenSSL._util.ffi.new("BIGNUM **")
e = OpenSSL._util.ffi.new("BIGNUM **")
d = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.RSA_get0_key(key, n, e, d)
key_public_data['modulus'] = self.bigint_to_int(n[0])
key_public_data['exponent'] = self.bigint_to_int(e[0])
key_private_data['exponent'] = self.bigint_to_int(d[0])
# Get factors
p = OpenSSL._util.ffi.new("BIGNUM **")
q = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.RSA_get0_factors(key, p, q)
key_private_data['p'] = self.bigint_to_int(p[0])
key_private_data['q'] = self.bigint_to_int(q[0])
else:
# Get modulus and exponents
key_public_data['modulus'] = self.bigint_to_int(key.n)
key_public_data['exponent'] = self.bigint_to_int(key.e)
key_private_data['exponent'] = self.bigint_to_int(key.d)
# Get factors
key_private_data['p'] = self.bigint_to_int(key.p)
key_private_data['q'] = self.bigint_to_int(key.q)
try_fallback = False
except AttributeError:
# Use fallback if available
pass
elif crypto.TYPE_DSA == openssl_key_type:
key_type = 'DSA'
key_public_data['size'] = self.key.bits()
try:
# Use OpenSSL directly to extract key data
key = OpenSSL._util.lib.EVP_PKEY_get1_DSA(self.key._pkey)
key = OpenSSL._util.ffi.gc(key, OpenSSL._util.lib.DSA_free)
# OpenSSL 1.1 and newer have functions to extract the parameters
# from the EVP PKEY data structures. Older versions didn't have
# these getters, and it was common use to simply access the values
# directly. Since there's no guarantee that these data structures
# will still be accessible in the future, we use the getters for
# 1.1 and later, and directly access the values for 1.0.x and
# earlier.
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# Get public parameters (primes and group element)
p = OpenSSL._util.ffi.new("BIGNUM **")
q = OpenSSL._util.ffi.new("BIGNUM **")
g = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.DSA_get0_pqg(key, p, q, g)
key_public_data['p'] = self.bigint_to_int(p[0])
key_public_data['q'] = self.bigint_to_int(q[0])
key_public_data['g'] = self.bigint_to_int(g[0])
# Get public and private key exponents
y = OpenSSL._util.ffi.new("BIGNUM **")
x = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.DSA_get0_key(key, y, x)
key_public_data['y'] = self.bigint_to_int(y[0])
key_private_data['x'] = self.bigint_to_int(x[0])
else:
# Get public parameters (primes and group element)
key_public_data['p'] = self.bigint_to_int(key.p)
key_public_data['q'] = self.bigint_to_int(key.q)
key_public_data['g'] = self.bigint_to_int(key.g)
# Get public and private key exponents
key_public_data['y'] = self.bigint_to_int(key.pub_key)
key_private_data['x'] = self.bigint_to_int(key.priv_key)
try_fallback = False
except AttributeError:
# Use fallback if available
pass
else:
# Return 'unknown'
key_type = 'unknown ({0})'.format(self.key.type())
# If needed and if possible, fall back to cryptography
if try_fallback and PYOPENSSL_VERSION >= LooseVersion('16.1.0') and CRYPTOGRAPHY_FOUND:
return _get_cryptography_key_info(self.key.to_cryptography_key())
return key_type, key_public_data, key_private_data
def _is_key_consistent(self, key_public_data, key_private_data):
openssl_key_type = self.key.type()
if crypto.TYPE_RSA == openssl_key_type:
try:
return self.key.check()
except crypto.Error:
# OpenSSL error means that key is not consistent
return False
if crypto.TYPE_DSA == openssl_key_type:
result = _check_dsa_consistency(key_public_data, key_private_data)
if result is not None:
return result
signature = crypto.sign(self.key, SIGNATURE_TEST_DATA, 'sha256')
# Verify wants a cert (where it can get the public key from)
cert = crypto.X509()
cert.set_pubkey(self.key)
try:
crypto.verify(cert, signature, SIGNATURE_TEST_DATA, 'sha256')
return True
except crypto.Error:
return False
# If needed and if possible, fall back to cryptography
if PYOPENSSL_VERSION >= LooseVersion('16.1.0') and CRYPTOGRAPHY_FOUND:
return _is_cryptography_key_consistent(self.key.to_cryptography_key(), key_public_data, key_private_data)
return None
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path'),
content=dict(type='str', no_log=True),
passphrase=dict(type='str', no_log=True),
return_private_key_data=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
),
required_one_of=(
['path', 'content'],
),
mutually_exclusive=(
['path', 'content'],
),
supports_check_mode=True,
)
try:
if module.params['path'] is not None:
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detect what backend we can use
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# If cryptography is available we'll use it
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Fail if no backend has been found
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
privatekey = PrivateKeyInfoPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
privatekey = PrivateKeyInfoCryptography(module)
result = privatekey.get_info()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/good_4295_3 |
crossvul-python_data_good_4295_7 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: x509_crl
version_added: '1.0.0'
short_description: Generate Certificate Revocation Lists (CRLs)
description:
- This module allows one to (re)generate or update Certificate Revocation Lists (CRLs).
- Certificates on the revocation list can be either specified via serial number and (optionally) their issuer,
or as a path to a certificate file in PEM format.
requirements:
- cryptography >= 1.2
author:
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the CRL file should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
mode:
description:
- Defines how to process entries of existing CRLs.
- If set to C(generate), makes sure that the CRL has the exact set of revoked certificates
as specified in I(revoked_certificates).
- If set to C(update), makes sure that the CRL contains the revoked certificates from
I(revoked_certificates), but can also contain other revoked certificates. If the CRL file
already exists, all entries from the existing CRL will also be included in the new CRL.
When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes).
type: str
default: generate
choices: [ generate, update ]
force:
description:
- Should the CRL be forced to be regenerated.
type: bool
default: no
backup:
description:
- Create a backup file including a timestamp so you can get the original
CRL back if you overwrote it with a new one by accident.
type: bool
default: no
path:
description:
- Remote absolute path where the generated CRL file should be created or is already located.
type: path
required: yes
format:
description:
- Whether the CRL file should be in PEM or DER format.
- If an existing CRL file does match everything but I(format), it will be converted to the correct format
instead of regenerated.
type: str
choices: [pem, der]
default: pem
privatekey_path:
description:
- Path to the CA's private key to use when signing the CRL.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: path
privatekey_content:
description:
- The content of the CA's private key to use when signing the CRL.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: str
privatekey_passphrase:
description:
- The passphrase for the I(privatekey_path).
- This is required if the private key is password protected.
type: str
issuer:
description:
- Key/value pairs that will be present in the issuer name field of the CRL.
- If you need to specify more than one value with the same key, use a list as value.
- Required if I(state) is C(present).
type: dict
last_update:
description:
- The point in time from which this CRL can be trusted.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
type: str
default: "+0s"
next_update:
description:
- "The absolute latest point in time by which this I(issuer) is expected to have issued
another CRL. Many clients will treat a CRL as expired once I(next_update) occurs."
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
- Required if I(state) is C(present).
type: str
digest:
description:
- Digest algorithm to be used when signing the CRL.
type: str
default: sha256
revoked_certificates:
description:
- List of certificates to be revoked.
- Required if I(state) is C(present).
type: list
elements: dict
suboptions:
path:
description:
- Path to a certificate in PEM format.
- The serial number and issuer will be extracted from the certificate.
- Mutually exclusive with I(content) and I(serial_number). One of these three options
must be specified.
type: path
content:
description:
- Content of a certificate in PEM format.
- The serial number and issuer will be extracted from the certificate.
- Mutually exclusive with I(path) and I(serial_number). One of these three options
must be specified.
type: str
serial_number:
description:
- Serial number of the certificate.
- Mutually exclusive with I(path) and I(content). One of these three options must
be specified.
type: int
revocation_date:
description:
- The point in time the certificate was revoked.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
type: str
default: "+0s"
issuer:
description:
- The certificate's issuer.
- "Example: C(DNS:ca.example.org)"
type: list
elements: str
issuer_critical:
description:
- Whether the certificate issuer extension should be critical.
type: bool
default: no
reason:
description:
- The value for the revocation reason extension.
type: str
choices:
- unspecified
- key_compromise
- ca_compromise
- affiliation_changed
- superseded
- cessation_of_operation
- certificate_hold
- privilege_withdrawn
- aa_compromise
- remove_from_crl
reason_critical:
description:
- Whether the revocation reason extension should be critical.
type: bool
default: no
invalidity_date:
description:
- The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent. This will NOT
change when I(ignore_timestamps) is set to C(yes).
type: str
invalidity_date_critical:
description:
- Whether the invalidity date extension should be critical.
type: bool
default: no
ignore_timestamps:
description:
- Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in
I(revoked_certificates)) should be ignored for idempotency checks. The timestamp
I(invalidity_date) in I(revoked_certificates) will never be ignored.
- Use this in combination with relative timestamps for these values to get idempotency.
type: bool
default: no
return_content:
description:
- If set to C(yes), will return the (current or generated) CRL's content as I(crl).
type: bool
default: no
extends_documentation_fragment:
- files
notes:
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
- Date specified should be UTC. Minutes and seconds are mandatory.
'''
EXAMPLES = r'''
- name: Generate a CRL
community.crypto.x509_crl:
path: /etc/ssl/my-ca.crl
privatekey_path: /etc/ssl/private/my-ca.pem
issuer:
CN: My CA
last_update: "+0s"
next_update: "+7d"
revoked_certificates:
- serial_number: 1234
revocation_date: 20190331202428Z
issuer:
CN: My CA
- serial_number: 2345
revocation_date: 20191013152910Z
reason: affiliation_changed
invalidity_date: 20191001000000Z
- path: /etc/ssl/crt/revoked-cert.pem
revocation_date: 20191010010203Z
'''
RETURN = r'''
filename:
description: Path to the generated CRL
returned: changed or success
type: str
sample: /path/to/my-ca.crl
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/my-ca.crl.2019-03-09@11:22~
privatekey:
description: Path to the private CA key
returned: changed or success
type: str
sample: /path/to/my-ca.pem
format:
description:
- Whether the CRL is in PEM format (C(pem)) or in DER format (C(der)).
returned: success
type: str
sample: pem
issuer:
description:
- The CRL's issuer.
- Note that for repeated values, only the last one will be returned.
returned: success
type: dict
sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
issuer_ordered:
description: The CRL's issuer as an ordered list of tuples.
returned: success
type: list
elements: list
sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
last_update:
description: The point in time from which this CRL can be trusted as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
next_update:
description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
digest:
description: The signature algorithm used to sign the CRL.
returned: success
type: str
sample: sha256WithRSAEncryption
revoked_certificates:
description: List of certificates to be revoked.
returned: success
type: list
elements: dict
contains:
serial_number:
description: Serial number of the certificate.
type: int
sample: 1234
revocation_date:
description: The point in time the certificate was revoked as ASN.1 TIME.
type: str
sample: 20190413202428Z
issuer:
description: The certificate's issuer.
type: list
elements: str
sample: '["DNS:ca.example.org"]'
issuer_critical:
description: Whether the certificate issuer extension is critical.
type: bool
sample: no
reason:
description:
- The value for the revocation reason extension.
- One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
C(remove_from_crl).
type: str
sample: key_compromise
reason_critical:
description: Whether the revocation reason extension is critical.
type: bool
sample: no
invalidity_date:
description: |
The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid as ASN.1 TIME.
type: str
sample: 20190413202428Z
invalidity_date_critical:
description: Whether the invalidity date extension is critical.
type: bool
sample: no
crl:
description:
- The (current or generated) CRL's content.
- Will be the CRL itself if I(format) is C(pem), and Base64 of the
CRL if I(format) is C(der).
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
'''
import base64
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_text
from ansible_collections.community.crypto.plugins.module_utils.io import (
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate,
parse_name_field,
get_relative_time_option,
select_message_digest,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_get_name,
cryptography_name_to_oid,
cryptography_oid_to_name,
cryptography_serial_number_of_cert,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_crl import (
REVOCATION_REASON_MAP,
TIMESTAMP_FORMAT,
cryptography_decode_revoked_certificate,
cryptography_dump_revoked,
cryptography_get_signature_algorithm_oid_from_crl,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.identify import (
identify_pem_format,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.x509 import (
CertificateRevocationListBuilder,
RevokedCertificateBuilder,
NameAttribute,
Name,
)
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class CRLError(OpenSSLObjectError):
pass
class CRL(OpenSSLObject):
def __init__(self, module):
super(CRL, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.format = module.params['format']
self.update = module.params['mode'] == 'update'
self.ignore_timestamps = module.params['ignore_timestamps']
self.return_content = module.params['return_content']
self.crl_content = None
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.issuer = parse_name_field(module.params['issuer'])
self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]]
self.last_update = get_relative_time_option(module.params['last_update'], 'last_update')
self.next_update = get_relative_time_option(module.params['next_update'], 'next_update')
self.digest = select_message_digest(module.params['digest'])
if self.digest is None:
raise CRLError('The digest "{0}" is not supported'.format(module.params['digest']))
self.revoked_certificates = []
for i, rc in enumerate(module.params['revoked_certificates']):
result = {
'serial_number': None,
'revocation_date': None,
'issuer': None,
'issuer_critical': False,
'reason': None,
'reason_critical': False,
'invalidity_date': None,
'invalidity_date_critical': False,
}
path_prefix = 'revoked_certificates[{0}].'.format(i)
if rc['path'] is not None or rc['content'] is not None:
# Load certificate from file or content
try:
if rc['content'] is not None:
rc['content'] = rc['content'].encode('utf-8')
cert = load_certificate(rc['path'], content=rc['content'], backend='cryptography')
result['serial_number'] = cryptography_serial_number_of_cert(cert)
except OpenSSLObjectError as e:
if rc['content'] is not None:
module.fail_json(
msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e))
)
else:
module.fail_json(
msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e))
)
else:
# Specify serial_number (and potentially issuer) directly
result['serial_number'] = rc['serial_number']
# All other options
if rc['issuer']:
result['issuer'] = [cryptography_get_name(issuer) for issuer in rc['issuer']]
result['issuer_critical'] = rc['issuer_critical']
result['revocation_date'] = get_relative_time_option(
rc['revocation_date'],
path_prefix + 'revocation_date'
)
if rc['reason']:
result['reason'] = REVOCATION_REASON_MAP[rc['reason']]
result['reason_critical'] = rc['reason_critical']
if rc['invalidity_date']:
result['invalidity_date'] = get_relative_time_option(
rc['invalidity_date'],
path_prefix + 'invalidity_date'
)
result['invalidity_date_critical'] = rc['invalidity_date_critical']
self.revoked_certificates.append(result)
self.module = module
self.backup = module.params['backup']
self.backup_file = None
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend='cryptography'
)
except OpenSSLBadPassphraseError as exc:
raise CRLError(exc)
self.crl = None
try:
with open(self.path, 'rb') as f:
data = f.read()
self.actual_format = 'pem' if identify_pem_format(data) else 'der'
if self.actual_format == 'pem':
self.crl = x509.load_pem_x509_crl(data, default_backend())
if self.return_content:
self.crl_content = data
else:
self.crl = x509.load_der_x509_crl(data, default_backend())
if self.return_content:
self.crl_content = base64.b64encode(data)
except Exception as dummy:
self.crl_content = None
self.actual_format = self.format
def remove(self):
if self.backup:
self.backup_file = self.module.backup_local(self.path)
super(CRL, self).remove(self.module)
def _compress_entry(self, entry):
if self.ignore_timestamps:
# Throw out revocation_date
return (
entry['serial_number'],
tuple(entry['issuer']) if entry['issuer'] is not None else None,
entry['issuer_critical'],
entry['reason'],
entry['reason_critical'],
entry['invalidity_date'],
entry['invalidity_date_critical'],
)
else:
return (
entry['serial_number'],
entry['revocation_date'],
tuple(entry['issuer']) if entry['issuer'] is not None else None,
entry['issuer_critical'],
entry['reason'],
entry['reason_critical'],
entry['invalidity_date'],
entry['invalidity_date_critical'],
)
def check(self, perms_required=True, ignore_conversion=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(CRL, self).check(self.module, perms_required)
if not state_and_perms:
return False
if self.crl is None:
return False
if self.last_update != self.crl.last_update and not self.ignore_timestamps:
return False
if self.next_update != self.crl.next_update and not self.ignore_timestamps:
return False
if self.digest.name != self.crl.signature_hash_algorithm.name:
return False
want_issuer = [(cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer]
if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]:
return False
old_entries = [self._compress_entry(cryptography_decode_revoked_certificate(cert)) for cert in self.crl]
new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates]
if self.update:
# We don't simply use a set so that duplicate entries are treated correctly
for entry in new_entries:
try:
old_entries.remove(entry)
except ValueError:
return False
else:
if old_entries != new_entries:
return False
if self.format != self.actual_format and not ignore_conversion:
return False
return True
def _generate_crl(self):
backend = default_backend()
crl = CertificateRevocationListBuilder()
try:
crl = crl.issuer_name(Name([
NameAttribute(cryptography_name_to_oid(entry[0]), to_text(entry[1]))
for entry in self.issuer
]))
except ValueError as e:
raise CRLError(e)
crl = crl.last_update(self.last_update)
crl = crl.next_update(self.next_update)
if self.update and self.crl:
new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates])
for entry in self.crl:
decoded_entry = self._compress_entry(cryptography_decode_revoked_certificate(entry))
if decoded_entry not in new_entries:
crl = crl.add_revoked_certificate(entry)
for entry in self.revoked_certificates:
revoked_cert = RevokedCertificateBuilder()
revoked_cert = revoked_cert.serial_number(entry['serial_number'])
revoked_cert = revoked_cert.revocation_date(entry['revocation_date'])
if entry['issuer'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.CertificateIssuer([
cryptography_get_name(name) for name in entry['issuer']
]),
entry['issuer_critical']
)
if entry['reason'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.CRLReason(entry['reason']),
entry['reason_critical']
)
if entry['invalidity_date'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.InvalidityDate(entry['invalidity_date']),
entry['invalidity_date_critical']
)
crl = crl.add_revoked_certificate(revoked_cert.build(backend))
self.crl = crl.sign(self.privatekey, self.digest, backend=backend)
if self.format == 'pem':
return self.crl.public_bytes(Encoding.PEM)
else:
return self.crl.public_bytes(Encoding.DER)
def generate(self):
result = None
if not self.check(perms_required=False, ignore_conversion=True) or self.force:
result = self._generate_crl()
elif not self.check(perms_required=False, ignore_conversion=False) and self.crl:
if self.format == 'pem':
result = self.crl.public_bytes(Encoding.PEM)
else:
result = self.crl.public_bytes(Encoding.DER)
if result is not None:
if self.return_content:
if self.format == 'pem':
self.crl_content = result
else:
self.crl_content = base64.b64encode(result)
if self.backup:
self.backup_file = self.module.backup_local(self.path)
write_file(self.module, result)
self.changed = True
file_args = self.module.load_file_common_arguments(self.module.params)
if self.module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'format': self.format,
'last_update': None,
'next_update': None,
'digest': None,
'issuer_ordered': None,
'issuer': None,
'revoked_certificates': [],
}
if self.backup_file:
result['backup_file'] = self.backup_file
if check_mode:
result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT)
# result['digest'] = cryptography_oid_to_name(self.crl.signature_algorithm_oid)
result['digest'] = self.module.params['digest']
result['issuer_ordered'] = self.issuer
result['issuer'] = {}
for k, v in self.issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for entry in self.revoked_certificates:
result['revoked_certificates'].append(cryptography_dump_revoked(entry))
elif self.crl:
result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
result['digest'] = cryptography_oid_to_name(cryptography_get_signature_algorithm_oid_from_crl(self.crl))
issuer = []
for attribute in self.crl.issuer:
issuer.append([cryptography_oid_to_name(attribute.oid), attribute.value])
result['issuer_ordered'] = issuer
result['issuer'] = {}
for k, v in issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for cert in self.crl:
entry = cryptography_decode_revoked_certificate(cert)
result['revoked_certificates'].append(cryptography_dump_revoked(entry))
if self.return_content:
result['crl'] = self.crl_content
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
mode=dict(type='str', default='generate', choices=['generate', 'update']),
force=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
path=dict(type='path', required=True),
format=dict(type='str', default='pem', choices=['pem', 'der']),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
privatekey_passphrase=dict(type='str', no_log=True),
issuer=dict(type='dict'),
last_update=dict(type='str', default='+0s'),
next_update=dict(type='str'),
digest=dict(type='str', default='sha256'),
ignore_timestamps=dict(type='bool', default=False),
return_content=dict(type='bool', default=False),
revoked_certificates=dict(
type='list',
elements='dict',
options=dict(
path=dict(type='path'),
content=dict(type='str'),
serial_number=dict(type='int'),
revocation_date=dict(type='str', default='+0s'),
issuer=dict(type='list', elements='str'),
issuer_critical=dict(type='bool', default=False),
reason=dict(
type='str',
choices=[
'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed',
'superseded', 'cessation_of_operation', 'certificate_hold',
'privilege_withdrawn', 'aa_compromise', 'remove_from_crl'
]
),
reason_critical=dict(type='bool', default=False),
invalidity_date=dict(type='str'),
invalidity_date_critical=dict(type='bool', default=False),
),
required_one_of=[['path', 'content', 'serial_number']],
mutually_exclusive=[['path', 'content', 'serial_number']],
),
),
required_if=[
('state', 'present', ['privatekey_path', 'privatekey_content'], True),
('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False),
],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
supports_check_mode=True,
add_file_common_args=True,
)
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
try:
crl = CRL(module)
if module.params['state'] == 'present':
if module.check_mode:
result = crl.dump(check_mode=True)
result['changed'] = module.params['force'] or not crl.check() or not crl.check(ignore_conversion=False)
module.exit_json(**result)
crl.generate()
else:
if module.check_mode:
result = crl.dump(check_mode=True)
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
crl.remove()
result = crl.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/good_4295_7 |
crossvul-python_data_bad_4295_7 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: x509_crl
version_added: '1.0.0'
short_description: Generate Certificate Revocation Lists (CRLs)
description:
- This module allows one to (re)generate or update Certificate Revocation Lists (CRLs).
- Certificates on the revocation list can be either specified via serial number and (optionally) their issuer,
or as a path to a certificate file in PEM format.
requirements:
- cryptography >= 1.2
author:
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the CRL file should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
mode:
description:
- Defines how to process entries of existing CRLs.
- If set to C(generate), makes sure that the CRL has the exact set of revoked certificates
as specified in I(revoked_certificates).
- If set to C(update), makes sure that the CRL contains the revoked certificates from
I(revoked_certificates), but can also contain other revoked certificates. If the CRL file
already exists, all entries from the existing CRL will also be included in the new CRL.
When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes).
type: str
default: generate
choices: [ generate, update ]
force:
description:
- Should the CRL be forced to be regenerated.
type: bool
default: no
backup:
description:
- Create a backup file including a timestamp so you can get the original
CRL back if you overwrote it with a new one by accident.
type: bool
default: no
path:
description:
- Remote absolute path where the generated CRL file should be created or is already located.
type: path
required: yes
format:
description:
- Whether the CRL file should be in PEM or DER format.
- If an existing CRL file does match everything but I(format), it will be converted to the correct format
instead of regenerated.
type: str
choices: [pem, der]
default: pem
privatekey_path:
description:
- Path to the CA's private key to use when signing the CRL.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: path
privatekey_content:
description:
- The content of the CA's private key to use when signing the CRL.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: str
privatekey_passphrase:
description:
- The passphrase for the I(privatekey_path).
- This is required if the private key is password protected.
type: str
issuer:
description:
- Key/value pairs that will be present in the issuer name field of the CRL.
- If you need to specify more than one value with the same key, use a list as value.
- Required if I(state) is C(present).
type: dict
last_update:
description:
- The point in time from which this CRL can be trusted.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
type: str
default: "+0s"
next_update:
description:
- "The absolute latest point in time by which this I(issuer) is expected to have issued
another CRL. Many clients will treat a CRL as expired once I(next_update) occurs."
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
- Required if I(state) is C(present).
type: str
digest:
description:
- Digest algorithm to be used when signing the CRL.
type: str
default: sha256
revoked_certificates:
description:
- List of certificates to be revoked.
- Required if I(state) is C(present).
type: list
elements: dict
suboptions:
path:
description:
- Path to a certificate in PEM format.
- The serial number and issuer will be extracted from the certificate.
- Mutually exclusive with I(content) and I(serial_number). One of these three options
must be specified.
type: path
content:
description:
- Content of a certificate in PEM format.
- The serial number and issuer will be extracted from the certificate.
- Mutually exclusive with I(path) and I(serial_number). One of these three options
must be specified.
type: str
serial_number:
description:
- Serial number of the certificate.
- Mutually exclusive with I(path) and I(content). One of these three options must
be specified.
type: int
revocation_date:
description:
- The point in time the certificate was revoked.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
type: str
default: "+0s"
issuer:
description:
- The certificate's issuer.
- "Example: C(DNS:ca.example.org)"
type: list
elements: str
issuer_critical:
description:
- Whether the certificate issuer extension should be critical.
type: bool
default: no
reason:
description:
- The value for the revocation reason extension.
type: str
choices:
- unspecified
- key_compromise
- ca_compromise
- affiliation_changed
- superseded
- cessation_of_operation
- certificate_hold
- privilege_withdrawn
- aa_compromise
- remove_from_crl
reason_critical:
description:
- Whether the revocation reason extension should be critical.
type: bool
default: no
invalidity_date:
description:
- The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent. This will NOT
change when I(ignore_timestamps) is set to C(yes).
type: str
invalidity_date_critical:
description:
- Whether the invalidity date extension should be critical.
type: bool
default: no
ignore_timestamps:
description:
- Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in
I(revoked_certificates)) should be ignored for idempotency checks. The timestamp
I(invalidity_date) in I(revoked_certificates) will never be ignored.
- Use this in combination with relative timestamps for these values to get idempotency.
type: bool
default: no
return_content:
description:
- If set to C(yes), will return the (current or generated) CRL's content as I(crl).
type: bool
default: no
extends_documentation_fragment:
- files
notes:
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
- Date specified should be UTC. Minutes and seconds are mandatory.
'''
EXAMPLES = r'''
- name: Generate a CRL
community.crypto.x509_crl:
path: /etc/ssl/my-ca.crl
privatekey_path: /etc/ssl/private/my-ca.pem
issuer:
CN: My CA
last_update: "+0s"
next_update: "+7d"
revoked_certificates:
- serial_number: 1234
revocation_date: 20190331202428Z
issuer:
CN: My CA
- serial_number: 2345
revocation_date: 20191013152910Z
reason: affiliation_changed
invalidity_date: 20191001000000Z
- path: /etc/ssl/crt/revoked-cert.pem
revocation_date: 20191010010203Z
'''
RETURN = r'''
filename:
description: Path to the generated CRL
returned: changed or success
type: str
sample: /path/to/my-ca.crl
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/my-ca.crl.2019-03-09@11:22~
privatekey:
description: Path to the private CA key
returned: changed or success
type: str
sample: /path/to/my-ca.pem
format:
description:
- Whether the CRL is in PEM format (C(pem)) or in DER format (C(der)).
returned: success
type: str
sample: pem
issuer:
description:
- The CRL's issuer.
- Note that for repeated values, only the last one will be returned.
returned: success
type: dict
sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
issuer_ordered:
description: The CRL's issuer as an ordered list of tuples.
returned: success
type: list
elements: list
sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
last_update:
description: The point in time from which this CRL can be trusted as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
next_update:
description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
digest:
description: The signature algorithm used to sign the CRL.
returned: success
type: str
sample: sha256WithRSAEncryption
revoked_certificates:
description: List of certificates to be revoked.
returned: success
type: list
elements: dict
contains:
serial_number:
description: Serial number of the certificate.
type: int
sample: 1234
revocation_date:
description: The point in time the certificate was revoked as ASN.1 TIME.
type: str
sample: 20190413202428Z
issuer:
description: The certificate's issuer.
type: list
elements: str
sample: '["DNS:ca.example.org"]'
issuer_critical:
description: Whether the certificate issuer extension is critical.
type: bool
sample: no
reason:
description:
- The value for the revocation reason extension.
- One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
C(remove_from_crl).
type: str
sample: key_compromise
reason_critical:
description: Whether the revocation reason extension is critical.
type: bool
sample: no
invalidity_date:
description: |
The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid as ASN.1 TIME.
type: str
sample: 20190413202428Z
invalidity_date_critical:
description: Whether the invalidity date extension is critical.
type: bool
sample: no
crl:
description:
- The (current or generated) CRL's content.
- Will be the CRL itself if I(format) is C(pem), and Base64 of the
CRL if I(format) is C(der).
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
'''
import base64
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_text
from ansible_collections.community.crypto.plugins.module_utils.io import (
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate,
parse_name_field,
get_relative_time_option,
select_message_digest,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_get_name,
cryptography_name_to_oid,
cryptography_oid_to_name,
cryptography_serial_number_of_cert,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_crl import (
REVOCATION_REASON_MAP,
TIMESTAMP_FORMAT,
cryptography_decode_revoked_certificate,
cryptography_dump_revoked,
cryptography_get_signature_algorithm_oid_from_crl,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.identify import (
identify_pem_format,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.x509 import (
CertificateRevocationListBuilder,
RevokedCertificateBuilder,
NameAttribute,
Name,
)
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class CRLError(OpenSSLObjectError):
pass
class CRL(OpenSSLObject):
def __init__(self, module):
super(CRL, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.format = module.params['format']
self.update = module.params['mode'] == 'update'
self.ignore_timestamps = module.params['ignore_timestamps']
self.return_content = module.params['return_content']
self.crl_content = None
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.issuer = parse_name_field(module.params['issuer'])
self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]]
self.last_update = get_relative_time_option(module.params['last_update'], 'last_update')
self.next_update = get_relative_time_option(module.params['next_update'], 'next_update')
self.digest = select_message_digest(module.params['digest'])
if self.digest is None:
raise CRLError('The digest "{0}" is not supported'.format(module.params['digest']))
self.revoked_certificates = []
for i, rc in enumerate(module.params['revoked_certificates']):
result = {
'serial_number': None,
'revocation_date': None,
'issuer': None,
'issuer_critical': False,
'reason': None,
'reason_critical': False,
'invalidity_date': None,
'invalidity_date_critical': False,
}
path_prefix = 'revoked_certificates[{0}].'.format(i)
if rc['path'] is not None or rc['content'] is not None:
# Load certificate from file or content
try:
if rc['content'] is not None:
rc['content'] = rc['content'].encode('utf-8')
cert = load_certificate(rc['path'], content=rc['content'], backend='cryptography')
result['serial_number'] = cryptography_serial_number_of_cert(cert)
except OpenSSLObjectError as e:
if rc['content'] is not None:
module.fail_json(
msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e))
)
else:
module.fail_json(
msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e))
)
else:
# Specify serial_number (and potentially issuer) directly
result['serial_number'] = rc['serial_number']
# All other options
if rc['issuer']:
result['issuer'] = [cryptography_get_name(issuer) for issuer in rc['issuer']]
result['issuer_critical'] = rc['issuer_critical']
result['revocation_date'] = get_relative_time_option(
rc['revocation_date'],
path_prefix + 'revocation_date'
)
if rc['reason']:
result['reason'] = REVOCATION_REASON_MAP[rc['reason']]
result['reason_critical'] = rc['reason_critical']
if rc['invalidity_date']:
result['invalidity_date'] = get_relative_time_option(
rc['invalidity_date'],
path_prefix + 'invalidity_date'
)
result['invalidity_date_critical'] = rc['invalidity_date_critical']
self.revoked_certificates.append(result)
self.module = module
self.backup = module.params['backup']
self.backup_file = None
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend='cryptography'
)
except OpenSSLBadPassphraseError as exc:
raise CRLError(exc)
self.crl = None
try:
with open(self.path, 'rb') as f:
data = f.read()
self.actual_format = 'pem' if identify_pem_format(data) else 'der'
if self.actual_format == 'pem':
self.crl = x509.load_pem_x509_crl(data, default_backend())
if self.return_content:
self.crl_content = data
else:
self.crl = x509.load_der_x509_crl(data, default_backend())
if self.return_content:
self.crl_content = base64.b64encode(data)
except Exception as dummy:
self.crl_content = None
self.actual_format = self.format
def remove(self):
if self.backup:
self.backup_file = self.module.backup_local(self.path)
super(CRL, self).remove(self.module)
def _compress_entry(self, entry):
if self.ignore_timestamps:
# Throw out revocation_date
return (
entry['serial_number'],
tuple(entry['issuer']) if entry['issuer'] is not None else None,
entry['issuer_critical'],
entry['reason'],
entry['reason_critical'],
entry['invalidity_date'],
entry['invalidity_date_critical'],
)
else:
return (
entry['serial_number'],
entry['revocation_date'],
tuple(entry['issuer']) if entry['issuer'] is not None else None,
entry['issuer_critical'],
entry['reason'],
entry['reason_critical'],
entry['invalidity_date'],
entry['invalidity_date_critical'],
)
def check(self, perms_required=True, ignore_conversion=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(CRL, self).check(self.module, perms_required)
if not state_and_perms:
return False
if self.crl is None:
return False
if self.last_update != self.crl.last_update and not self.ignore_timestamps:
return False
if self.next_update != self.crl.next_update and not self.ignore_timestamps:
return False
if self.digest.name != self.crl.signature_hash_algorithm.name:
return False
want_issuer = [(cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer]
if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]:
return False
old_entries = [self._compress_entry(cryptography_decode_revoked_certificate(cert)) for cert in self.crl]
new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates]
if self.update:
# We don't simply use a set so that duplicate entries are treated correctly
for entry in new_entries:
try:
old_entries.remove(entry)
except ValueError:
return False
else:
if old_entries != new_entries:
return False
if self.format != self.actual_format and not ignore_conversion:
return False
return True
def _generate_crl(self):
backend = default_backend()
crl = CertificateRevocationListBuilder()
try:
crl = crl.issuer_name(Name([
NameAttribute(cryptography_name_to_oid(entry[0]), to_text(entry[1]))
for entry in self.issuer
]))
except ValueError as e:
raise CRLError(e)
crl = crl.last_update(self.last_update)
crl = crl.next_update(self.next_update)
if self.update and self.crl:
new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates])
for entry in self.crl:
decoded_entry = self._compress_entry(cryptography_decode_revoked_certificate(entry))
if decoded_entry not in new_entries:
crl = crl.add_revoked_certificate(entry)
for entry in self.revoked_certificates:
revoked_cert = RevokedCertificateBuilder()
revoked_cert = revoked_cert.serial_number(entry['serial_number'])
revoked_cert = revoked_cert.revocation_date(entry['revocation_date'])
if entry['issuer'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.CertificateIssuer([
cryptography_get_name(name) for name in entry['issuer']
]),
entry['issuer_critical']
)
if entry['reason'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.CRLReason(entry['reason']),
entry['reason_critical']
)
if entry['invalidity_date'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.InvalidityDate(entry['invalidity_date']),
entry['invalidity_date_critical']
)
crl = crl.add_revoked_certificate(revoked_cert.build(backend))
self.crl = crl.sign(self.privatekey, self.digest, backend=backend)
if self.format == 'pem':
return self.crl.public_bytes(Encoding.PEM)
else:
return self.crl.public_bytes(Encoding.DER)
def generate(self):
result = None
if not self.check(perms_required=False, ignore_conversion=True) or self.force:
result = self._generate_crl()
elif not self.check(perms_required=False, ignore_conversion=False) and self.crl:
if self.format == 'pem':
result = self.crl.public_bytes(Encoding.PEM)
else:
result = self.crl.public_bytes(Encoding.DER)
if result is not None:
if self.return_content:
if self.format == 'pem':
self.crl_content = result
else:
self.crl_content = base64.b64encode(result)
if self.backup:
self.backup_file = self.module.backup_local(self.path)
write_file(self.module, result)
self.changed = True
file_args = self.module.load_file_common_arguments(self.module.params)
if self.module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'format': self.format,
'last_update': None,
'next_update': None,
'digest': None,
'issuer_ordered': None,
'issuer': None,
'revoked_certificates': [],
}
if self.backup_file:
result['backup_file'] = self.backup_file
if check_mode:
result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT)
# result['digest'] = cryptography_oid_to_name(self.crl.signature_algorithm_oid)
result['digest'] = self.module.params['digest']
result['issuer_ordered'] = self.issuer
result['issuer'] = {}
for k, v in self.issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for entry in self.revoked_certificates:
result['revoked_certificates'].append(cryptography_dump_revoked(entry))
elif self.crl:
result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
result['digest'] = cryptography_oid_to_name(cryptography_get_signature_algorithm_oid_from_crl(self.crl))
issuer = []
for attribute in self.crl.issuer:
issuer.append([cryptography_oid_to_name(attribute.oid), attribute.value])
result['issuer_ordered'] = issuer
result['issuer'] = {}
for k, v in issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for cert in self.crl:
entry = cryptography_decode_revoked_certificate(cert)
result['revoked_certificates'].append(cryptography_dump_revoked(entry))
if self.return_content:
result['crl'] = self.crl_content
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
mode=dict(type='str', default='generate', choices=['generate', 'update']),
force=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
path=dict(type='path', required=True),
format=dict(type='str', default='pem', choices=['pem', 'der']),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
privatekey_passphrase=dict(type='str', no_log=True),
issuer=dict(type='dict'),
last_update=dict(type='str', default='+0s'),
next_update=dict(type='str'),
digest=dict(type='str', default='sha256'),
ignore_timestamps=dict(type='bool', default=False),
return_content=dict(type='bool', default=False),
revoked_certificates=dict(
type='list',
elements='dict',
options=dict(
path=dict(type='path'),
content=dict(type='str'),
serial_number=dict(type='int'),
revocation_date=dict(type='str', default='+0s'),
issuer=dict(type='list', elements='str'),
issuer_critical=dict(type='bool', default=False),
reason=dict(
type='str',
choices=[
'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed',
'superseded', 'cessation_of_operation', 'certificate_hold',
'privilege_withdrawn', 'aa_compromise', 'remove_from_crl'
]
),
reason_critical=dict(type='bool', default=False),
invalidity_date=dict(type='str'),
invalidity_date_critical=dict(type='bool', default=False),
),
required_one_of=[['path', 'content', 'serial_number']],
mutually_exclusive=[['path', 'content', 'serial_number']],
),
),
required_if=[
('state', 'present', ['privatekey_path', 'privatekey_content'], True),
('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False),
],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
supports_check_mode=True,
add_file_common_args=True,
)
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
try:
crl = CRL(module)
if module.params['state'] == 'present':
if module.check_mode:
result = crl.dump(check_mode=True)
result['changed'] = module.params['force'] or not crl.check() or not crl.check(ignore_conversion=False)
module.exit_json(**result)
crl.generate()
else:
if module.check_mode:
result = crl.dump(check_mode=True)
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
crl.remove()
result = crl.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/bad_4295_7 |
crossvul-python_data_good_4295_6 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: x509_certificate
short_description: Generate and/or check OpenSSL certificates
description:
- This module allows one to (re)generate OpenSSL certificates.
- It implements a notion of provider (ie. C(selfsigned), C(ownca), C(acme), C(assertonly), C(entrust))
for your certificate.
- The C(assertonly) provider is intended for use cases where one is only interested in
checking properties of a supplied certificate. Please note that this provider has been
deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0. See the examples on how
to emulate C(assertonly) usage with M(community.crypto.x509_certificate_info),
M(community.crypto.openssl_csr_info), M(community.crypto.openssl_privatekey_info) and
M(ansible.builtin.assert). This also allows more flexible checks than
the ones offered by the C(assertonly) provider.
- The C(ownca) provider is intended for generating OpenSSL certificate signed with your own
CA (Certificate Authority) certificate (self-signed certificate).
- Many properties that can be specified in this module are for validation of an
existing or newly generated certificate. The proper place to specify them, if you
want to receive a certificate with these properties is a CSR (Certificate Signing Request).
- "Please note that the module regenerates existing certificate if it doesn't match the module's
options, or if it seems to be corrupt. If you are concerned that this could overwrite
your existing certificate, consider using the I(backup) option."
- It uses the pyOpenSSL or cryptography python library to interact with OpenSSL.
- If both the cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with C(select_crypto_backend)).
Please note that the PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
- Note that this module was called C(openssl_certificate) when included directly in Ansible up to version 2.9.
When moved to the collection C(community.crypto), it was renamed to
M(community.crypto.x509_certificate). From Ansible 2.10 on, it can still be used by the
old short name (or by C(ansible.builtin.openssl_certificate)), which redirects to
C(community.crypto.x509_certificate). When using FQCNs or when using the
L(collections,https://docs.ansible.com/ansible/latest/user_guide/collections_using.html#using-collections-in-a-playbook)
keyword, the new name M(community.crypto.x509_certificate) should be used to avoid
a deprecation warning.
requirements:
- PyOpenSSL >= 0.15 or cryptography >= 1.6 (if using C(selfsigned) or C(assertonly) provider)
- acme-tiny >= 4.0.0 (if using the C(acme) provider)
author:
- Yanis Guenane (@Spredzy)
- Markus Teufelberger (@MarkusTeufelberger)
options:
state:
description:
- Whether the certificate should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
path:
description:
- Remote absolute path where the generated certificate file should be created or is already located.
type: path
required: true
provider:
description:
- Name of the provider to use to generate/retrieve the OpenSSL certificate.
- The C(assertonly) provider will not generate files and fail if the certificate file is missing.
- The C(assertonly) provider has been deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
Please see the examples on how to emulate it with
M(community.crypto.x509_certificate_info), M(community.crypto.openssl_csr_info),
M(community.crypto.openssl_privatekey_info) and M(ansible.builtin.assert).
- "The C(entrust) provider was added for Ansible 2.9 and requires credentials for the
L(Entrust Certificate Services,https://www.entrustdatacard.com/products/categories/ssl-certificates) (ECS) API."
- Required if I(state) is C(present).
type: str
choices: [ acme, assertonly, entrust, ownca, selfsigned ]
force:
description:
- Generate the certificate, even if it already exists.
type: bool
default: no
csr_path:
description:
- Path to the Certificate Signing Request (CSR) used to generate this certificate.
- This is not required in C(assertonly) mode.
- This is mutually exclusive with I(csr_content).
type: path
csr_content:
description:
- Content of the Certificate Signing Request (CSR) used to generate this certificate.
- This is not required in C(assertonly) mode.
- This is mutually exclusive with I(csr_path).
type: str
version_added: '1.0.0'
privatekey_path:
description:
- Path to the private key to use when signing the certificate.
- This is mutually exclusive with I(privatekey_content).
type: path
privatekey_content:
description:
- Path to the private key to use when signing the certificate.
- This is mutually exclusive with I(privatekey_path).
type: str
version_added: '1.0.0'
privatekey_passphrase:
description:
- The passphrase for the I(privatekey_path) resp. I(privatekey_content).
- This is required if the private key is password protected.
type: str
selfsigned_version:
description:
- Version of the C(selfsigned) certificate.
- Nowadays it should almost always be C(3).
- This is only used by the C(selfsigned) provider.
type: int
default: 3
selfsigned_digest:
description:
- Digest algorithm to be used when self-signing the certificate.
- This is only used by the C(selfsigned) provider.
type: str
default: sha256
selfsigned_not_before:
description:
- The point in time the certificate is valid from.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will start being valid from now.
- This is only used by the C(selfsigned) provider.
type: str
default: +0s
aliases: [ selfsigned_notBefore ]
selfsigned_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will stop being valid 10 years from now.
- This is only used by the C(selfsigned) provider.
- On macOS 10.15 and onwards, TLS server certificates must have a validity period of 825 days or fewer.
Please see U(https://support.apple.com/en-us/HT210176) for more details.
type: str
default: +3650d
aliases: [ selfsigned_notAfter ]
selfsigned_create_subject_key_identifier:
description:
- Whether to create the Subject Key Identifier (SKI) from the public key.
- A value of C(create_if_not_provided) (default) only creates a SKI when the CSR does not
provide one.
- A value of C(always_create) always creates a SKI. If the CSR provides one, that one is
ignored.
- A value of C(never_create) never creates a SKI. If the CSR provides one, that one is used.
- This is only used by the C(selfsigned) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: str
choices: [create_if_not_provided, always_create, never_create]
default: create_if_not_provided
ownca_path:
description:
- Remote absolute path of the CA (Certificate Authority) certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_content).
type: path
ownca_content:
description:
- Content of the CA (Certificate Authority) certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_path).
type: str
version_added: '1.0.0'
ownca_privatekey_path:
description:
- Path to the CA (Certificate Authority) private key to use when signing the certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_privatekey_content).
type: path
ownca_privatekey_content:
description:
- Path to the CA (Certificate Authority) private key to use when signing the certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_privatekey_path).
type: str
version_added: '1.0.0'
ownca_privatekey_passphrase:
description:
- The passphrase for the I(ownca_privatekey_path) resp. I(ownca_privatekey_content).
- This is only used by the C(ownca) provider.
type: str
ownca_digest:
description:
- The digest algorithm to be used for the C(ownca) certificate.
- This is only used by the C(ownca) provider.
type: str
default: sha256
ownca_version:
description:
- The version of the C(ownca) certificate.
- Nowadays it should almost always be C(3).
- This is only used by the C(ownca) provider.
type: int
default: 3
ownca_not_before:
description:
- The point in time the certificate is valid from.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will start being valid from now.
- This is only used by the C(ownca) provider.
type: str
default: +0s
ownca_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will stop being valid 10 years from now.
- This is only used by the C(ownca) provider.
- On macOS 10.15 and onwards, TLS server certificates must have a validity period of 825 days or fewer.
Please see U(https://support.apple.com/en-us/HT210176) for more details.
type: str
default: +3650d
ownca_create_subject_key_identifier:
description:
- Whether to create the Subject Key Identifier (SKI) from the public key.
- A value of C(create_if_not_provided) (default) only creates a SKI when the CSR does not
provide one.
- A value of C(always_create) always creates a SKI. If the CSR provides one, that one is
ignored.
- A value of C(never_create) never creates a SKI. If the CSR provides one, that one is used.
- This is only used by the C(ownca) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: str
choices: [create_if_not_provided, always_create, never_create]
default: create_if_not_provided
ownca_create_authority_key_identifier:
description:
- Create a Authority Key Identifier from the CA's certificate. If the CSR provided
a authority key identifier, it is ignored.
- The Authority Key Identifier is generated from the CA certificate's Subject Key Identifier,
if available. If it is not available, the CA certificate's public key will be used.
- This is only used by the C(ownca) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: bool
default: yes
acme_accountkey_path:
description:
- The path to the accountkey for the C(acme) provider.
- This is only used by the C(acme) provider.
type: path
acme_challenge_path:
description:
- The path to the ACME challenge directory that is served on U(http://<HOST>:80/.well-known/acme-challenge/)
- This is only used by the C(acme) provider.
type: path
acme_chain:
description:
- Include the intermediate certificate to the generated certificate
- This is only used by the C(acme) provider.
- Note that this is only available for older versions of C(acme-tiny).
New versions include the chain automatically, and setting I(acme_chain) to C(yes) results in an error.
type: bool
default: no
acme_directory:
description:
- "The ACME directory to use. You can use any directory that supports the ACME protocol, such as Buypass or Let's Encrypt."
- "Let's Encrypt recommends using their staging server while developing jobs. U(https://letsencrypt.org/docs/staging-environment/)."
type: str
default: https://acme-v02.api.letsencrypt.org/directory
version_added: '1.0.0'
signature_algorithms:
description:
- A list of algorithms that you would accept the certificate to be signed with
(e.g. ['sha256WithRSAEncryption', 'sha512WithRSAEncryption']).
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
issuer:
description:
- The key/value pairs that must be present in the issuer name field of the certificate.
- If you need to specify more than one value with the same key, use a list as value.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: dict
issuer_strict:
description:
- If set to C(yes), the I(issuer) field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
subject:
description:
- The key/value pairs that must be present in the subject name field of the certificate.
- If you need to specify more than one value with the same key, use a list as value.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: dict
subject_strict:
description:
- If set to C(yes), the I(subject) field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
has_expired:
description:
- Checks if the certificate is expired/not expired at the time the module is executed.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
version:
description:
- The version of the certificate.
- Nowadays it should almost always be 3.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: int
valid_at:
description:
- The certificate must be valid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
invalid_at:
description:
- The certificate must be invalid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
not_before:
description:
- The certificate must start to become valid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
aliases: [ notBefore ]
not_after:
description:
- The certificate must expire at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
aliases: [ notAfter ]
valid_in:
description:
- The certificate must still be valid at this relative time offset from now.
- Valid format is C([+-]timespec | number_of_seconds) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using this parameter, this module is NOT idempotent.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
key_usage:
description:
- The I(key_usage) extension field must contain all these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ keyUsage ]
key_usage_strict:
description:
- If set to C(yes), the I(key_usage) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ keyUsage_strict ]
extended_key_usage:
description:
- The I(extended_key_usage) extension field must contain all these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ extendedKeyUsage ]
extended_key_usage_strict:
description:
- If set to C(yes), the I(extended_key_usage) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ extendedKeyUsage_strict ]
subject_alt_name:
description:
- The I(subject_alt_name) extension field must contain these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ subjectAltName ]
subject_alt_name_strict:
description:
- If set to C(yes), the I(subject_alt_name) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ subjectAltName_strict ]
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
backup:
description:
- Create a backup file including a timestamp so you can get the original
certificate back if you overwrote it with a new one by accident.
- This is not used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
entrust_cert_type:
description:
- Specify the type of certificate requested.
- This is only used by the C(entrust) provider.
type: str
default: STANDARD_SSL
choices: [ 'STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL', 'PRIVATE_SSL', 'PD_SSL', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT' ]
entrust_requester_email:
description:
- The email of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_requester_name:
description:
- The name of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_requester_phone:
description:
- The phone number of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_user:
description:
- The username for authentication to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_key:
description:
- The key (password) for authentication to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_client_cert_path:
description:
- The path to the client certificate used to authenticate to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: path
entrust_api_client_cert_key_path:
description:
- The path to the private key of the client certificate used to authenticate to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: path
entrust_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as an absolute timestamp.
- A valid absolute time format is C(ASN.1 TIME) such as C(2019-06-18).
- A valid relative time format is C([+-]timespec) where timespec can be an integer + C([w | d | h | m | s]), such as C(+365d) or C(+32w1d2h)).
- Time will always be interpreted as UTC.
- Note that only the date (day, month, year) is supported for specifying the expiry date of the issued certificate.
- The full date-time is adjusted to EST (GMT -5:00) before issuance, which may result in a certificate with an expiration date one day
earlier than expected if a relative time is used.
- The minimum certificate lifetime is 90 days, and maximum is three years.
- If this value is not specified, the certificate will stop being valid 365 days the date of issue.
- This is only used by the C(entrust) provider.
type: str
default: +365d
entrust_api_specification_path:
description:
- The path to the specification file defining the Entrust Certificate Services (ECS) API configuration.
- You can use this to keep a local copy of the specification to avoid downloading it every time the module is used.
- This is only used by the C(entrust) provider.
type: path
default: https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml
return_content:
description:
- If set to C(yes), will return the (current or generated) certificate's content as I(certificate).
type: bool
default: no
version_added: '1.0.0'
extends_documentation_fragment: files
notes:
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
- Date specified should be UTC. Minutes and seconds are mandatory.
- For security reason, when you use C(ownca) provider, you should NOT run
M(community.crypto.x509_certificate) on a target machine, but on a dedicated CA machine. It
is recommended not to store the CA private key on the target machine. Once signed, the
certificate can be moved to the target machine.
seealso:
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate a Self Signed OpenSSL certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
privatekey_path: /etc/ssl/private/ansible.com.pem
csr_path: /etc/ssl/csr/ansible.com.csr
provider: selfsigned
- name: Generate an OpenSSL certificate signed with your own CA certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
ownca_path: /etc/ssl/crt/ansible_CA.crt
ownca_privatekey_path: /etc/ssl/private/ansible_CA.pem
provider: ownca
- name: Generate a Let's Encrypt Certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: acme
acme_accountkey_path: /etc/ssl/private/ansible.com.pem
acme_challenge_path: /etc/ssl/challenges/ansible.com/
- name: Force (re-)generate a new Let's Encrypt Certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: acme
acme_accountkey_path: /etc/ssl/private/ansible.com.pem
acme_challenge_path: /etc/ssl/challenges/ansible.com/
force: yes
- name: Generate an Entrust certificate via the Entrust Certificate Services (ECS) API
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: entrust
entrust_requester_name: Jo Doe
entrust_requester_email: jdoe@ansible.com
entrust_requester_phone: 555-555-5555
entrust_cert_type: STANDARD_SSL
entrust_api_user: apiusername
entrust_api_key: a^lv*32!cd9LnT
entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt
entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-key.crt
entrust_api_specification_path: /etc/ssl/entrust/api-docs/cms-api-2.1.0.yaml
# The following example shows one assertonly usage using all existing options for
# assertonly, and shows how to emulate the behavior with the x509_certificate_info,
# openssl_csr_info, openssl_privatekey_info and assert modules:
- community.crypto.x509_certificate:
provider: assertonly
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
privatekey_path: /etc/ssl/csr/ansible.com.key
signature_algorithms:
- sha256WithRSAEncryption
- sha512WithRSAEncryption
subject:
commonName: ansible.com
subject_strict: yes
issuer:
commonName: ansible.com
issuer_strict: yes
has_expired: no
version: 3
key_usage:
- Data Encipherment
key_usage_strict: yes
extended_key_usage:
- DVCS
extended_key_usage_strict: yes
subject_alt_name:
- dns:ansible.com
subject_alt_name_strict: yes
not_before: 20190331202428Z
not_after: 20190413202428Z
valid_at: "+1d10h"
invalid_at: 20200331202428Z
valid_in: 10 # in ten seconds
- community.crypto.x509_certificate_info:
path: /etc/ssl/crt/ansible.com.crt
# for valid_at, invalid_at and valid_in
valid_at:
one_day_ten_hours: "+1d10h"
fixed_timestamp: 20200331202428Z
ten_seconds: "+10"
register: result
- community.crypto.openssl_csr_info:
# Verifies that the CSR signature is valid; module will fail if not
path: /etc/ssl/csr/ansible.com.csr
register: result_csr
- community.crypto.openssl_privatekey_info:
path: /etc/ssl/csr/ansible.com.key
register: result_privatekey
- assert:
that:
# When private key is specified for assertonly, this will be checked:
- result.public_key == result_privatekey.public_key
# When CSR is specified for assertonly, this will be checked:
- result.public_key == result_csr.public_key
- result.subject_ordered == result_csr.subject_ordered
- result.extensions_by_oid == result_csr.extensions_by_oid
# signature_algorithms check
- "result.signature_algorithm == 'sha256WithRSAEncryption' or result.signature_algorithm == 'sha512WithRSAEncryption'"
# subject and subject_strict
- "result.subject.commonName == 'ansible.com'"
- "result.subject | length == 1" # the number must be the number of entries you check for
# issuer and issuer_strict
- "result.issuer.commonName == 'ansible.com'"
- "result.issuer | length == 1" # the number must be the number of entries you check for
# has_expired
- not result.expired
# version
- result.version == 3
# key_usage and key_usage_strict
- "'Data Encipherment' in result.key_usage"
- "result.key_usage | length == 1" # the number must be the number of entries you check for
# extended_key_usage and extended_key_usage_strict
- "'DVCS' in result.extended_key_usage"
- "result.extended_key_usage | length == 1" # the number must be the number of entries you check for
# subject_alt_name and subject_alt_name_strict
- "'dns:ansible.com' in result.subject_alt_name"
- "result.subject_alt_name | length == 1" # the number must be the number of entries you check for
# not_before and not_after
- "result.not_before == '20190331202428Z'"
- "result.not_after == '20190413202428Z'"
# valid_at, invalid_at and valid_in
- "result.valid_at.one_day_ten_hours" # for valid_at
- "not result.valid_at.fixed_timestamp" # for invalid_at
- "result.valid_at.ten_seconds" # for valid_in
# Examples for some checks one could use the assertonly provider for:
# (Please note that assertonly has been deprecated!)
# How to use the assertonly provider to implement and trigger your own custom certificate generation workflow:
- name: Check if a certificate is currently still valid, ignoring failures
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
has_expired: no
ignore_errors: yes
register: validity_check
- name: Run custom task(s) to get a new, valid certificate in case the initial check failed
command: superspecialSSL recreate /etc/ssl/crt/example.com.crt
when: validity_check.failed
- name: Check the new certificate again for validity with the same parameters, this time failing the play if it is still invalid
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
has_expired: no
when: validity_check.failed
# Some other checks that assertonly could be used for:
- name: Verify that an existing certificate was issued by the Let's Encrypt CA and is currently still valid
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
issuer:
O: Let's Encrypt
has_expired: no
- name: Ensure that a certificate uses a modern signature algorithm (no SHA1, MD5 or DSA)
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
signature_algorithms:
- sha224WithRSAEncryption
- sha256WithRSAEncryption
- sha384WithRSAEncryption
- sha512WithRSAEncryption
- sha224WithECDSAEncryption
- sha256WithECDSAEncryption
- sha384WithECDSAEncryption
- sha512WithECDSAEncryption
- name: Ensure that the existing certificate belongs to the specified private key
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
privatekey_path: /etc/ssl/private/example.com.pem
provider: assertonly
- name: Ensure that the existing certificate is still valid at the winter solstice 2017
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
valid_at: 20171221162800Z
- name: Ensure that the existing certificate is still valid 2 weeks (1209600 seconds) from now
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
valid_in: 1209600
- name: Ensure that the existing certificate is only used for digital signatures and encrypting other keys
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
key_usage:
- digitalSignature
- keyEncipherment
key_usage_strict: true
- name: Ensure that the existing certificate can be used for client authentication
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
extended_key_usage:
- clientAuth
- name: Ensure that the existing certificate can only be used for client authentication and time stamping
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
extended_key_usage:
- clientAuth
- 1.3.6.1.5.5.7.3.8
extended_key_usage_strict: true
- name: Ensure that the existing certificate has a certain domain in its subjectAltName
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
subject_alt_name:
- www.example.com
- test.example.com
'''
RETURN = r'''
filename:
description: Path to the generated certificate.
returned: changed or success
type: str
sample: /etc/ssl/crt/www.ansible.com.crt
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/www.ansible.com.crt.2019-03-09@11:22~
certificate:
description: The (current or generated) certificate's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import abc
import datetime
import time
import os
import tempfile
import traceback
from distutils.version import LooseVersion
from random import randrange
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible_collections.community.crypto.plugins.module_utils.compat import ipaddress as compat_ipaddress
from ansible_collections.community.crypto.plugins.module_utils.ecs.api import ECSClient, RestOperationException, SessionConfigurationException
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate,
load_certificate_request,
parse_name_field,
get_relative_time_option,
select_message_digest,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_compare_public_keys,
cryptography_get_name,
cryptography_name_to_oid,
cryptography_key_needs_digest_for_signing,
cryptography_parse_key_usage_params,
cryptography_serial_number_of_cert,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.pyopenssl_support import (
pyopenssl_normalize_name_attribute,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.6'
MINIMAL_PYOPENSSL_VERSION = '0.15'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.x509 import NameAttribute, Name
from cryptography.x509.oid import NameOID
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class CertificateError(OpenSSLObjectError):
pass
class Certificate(OpenSSLObject):
def __init__(self, module, backend):
super(Certificate, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.provider = module.params['provider']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.csr_path = module.params['csr_path']
self.csr_content = module.params['csr_content']
if self.csr_content is not None:
self.csr_content = self.csr_content.encode('utf-8')
self.cert = None
self.privatekey = None
self.csr = None
self.backend = backend
self.module = module
self.return_content = module.params['return_content']
# The following are default values which make sure check() works as
# before if providers do not explicitly change these properties.
self.create_subject_key_identifier = 'never_create'
self.create_authority_key_identifier = False
self.backup = module.params['backup']
self.backup_file = None
def _validate_privatekey(self):
if self.backend == 'pyopenssl':
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey(self.privatekey)
ctx.use_certificate(self.cert)
try:
ctx.check_privatekey()
return True
except OpenSSL.SSL.Error:
return False
elif self.backend == 'cryptography':
return cryptography_compare_public_keys(self.cert.public_key(), self.privatekey.public_key())
def _validate_csr(self):
if self.backend == 'pyopenssl':
# Verify that CSR is signed by certificate's private key
try:
self.csr.verify(self.cert.get_pubkey())
except OpenSSL.crypto.Error:
return False
# Check subject
if self.csr.get_subject() != self.cert.get_subject():
return False
# Check extensions
csr_extensions = self.csr.get_extensions()
cert_extension_count = self.cert.get_extension_count()
if len(csr_extensions) != cert_extension_count:
return False
for extension_number in range(0, cert_extension_count):
cert_extension = self.cert.get_extension(extension_number)
csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
if cert_extension.get_data() != list(csr_extension)[0].get_data():
return False
return True
elif self.backend == 'cryptography':
# Verify that CSR is signed by certificate's private key
if not self.csr.is_signature_valid:
return False
if not cryptography_compare_public_keys(self.csr.public_key(), self.cert.public_key()):
return False
# Check subject
if self.csr.subject != self.cert.subject:
return False
# Check extensions
cert_exts = list(self.cert.extensions)
csr_exts = list(self.csr.extensions)
if self.create_subject_key_identifier != 'never_create':
# Filter out SubjectKeyIdentifier extension before comparison
cert_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), cert_exts))
csr_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), csr_exts))
if self.create_authority_key_identifier:
# Filter out AuthorityKeyIdentifier extension before comparison
cert_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), cert_exts))
csr_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), csr_exts))
if len(cert_exts) != len(csr_exts):
return False
for cert_ext in cert_exts:
try:
csr_ext = self.csr.extensions.get_extension_for_oid(cert_ext.oid)
if cert_ext != csr_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(Certificate, self).remove(module)
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(Certificate, self).check(module, perms_required)
if not state_and_perms:
return False
try:
self.cert = load_certificate(self.path, backend=self.backend)
except Exception as dummy:
return False
if self.privatekey_path or self.privatekey_content:
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
raise CertificateError(exc)
if not self._validate_privatekey():
return False
if self.csr_path or self.csr_content:
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
if not self._validate_csr():
return False
# Check SubjectKeyIdentifier
if self.backend == 'cryptography' and self.create_subject_key_identifier != 'never_create':
# Get hold of certificate's SKI
try:
ext = self.cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound as dummy:
return False
# Get hold of CSR's SKI for 'create_if_not_provided'
csr_ext = None
if self.create_subject_key_identifier == 'create_if_not_provided':
try:
csr_ext = self.csr.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound as dummy:
pass
if csr_ext is None:
# If CSR had no SKI, or we chose to ignore it ('always_create'), compare with created SKI
if ext.value.digest != x509.SubjectKeyIdentifier.from_public_key(self.cert.public_key()).digest:
return False
else:
# If CSR had SKI and we didn't ignore it ('create_if_not_provided'), compare SKIs
if ext.value.digest != csr_ext.value.digest:
return False
return True
class CertificateAbsent(Certificate):
def __init__(self, module):
super(CertificateAbsent, self).__init__(module, 'cryptography') # backend doesn't matter
def generate(self, module):
pass
def dump(self, check_mode=False):
# Use only for absent
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
result['certificate'] = None
return result
class SelfSignedCertificateCryptography(Certificate):
"""Generate the self-signed certificate, using the cryptography backend"""
def __init__(self, module):
super(SelfSignedCertificateCryptography, self).__init__(module, 'cryptography')
self.create_subject_key_identifier = module.params['selfsigned_create_subject_key_identifier']
self.notBefore = get_relative_time_option(module.params['selfsigned_not_before'], 'selfsigned_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['selfsigned_not_after'], 'selfsigned_not_after', backend=self.backend)
self.digest = select_message_digest(module.params['selfsigned_digest'])
self.version = module.params['selfsigned_version']
self.serial_number = x509.random_serial_number()
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key file {0} does not exist'.format(self.privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
self._module = module
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=to_native(exc))
if cryptography_key_needs_digest_for_signing(self.privatekey):
if self.digest is None:
raise CertificateError(
'The digest %s is not supported with the cryptography backend' % module.params['selfsigned_digest']
)
else:
self.digest = None
def generate(self, module):
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key %s does not exist' % self.privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
try:
cert_builder = x509.CertificateBuilder()
cert_builder = cert_builder.subject_name(self.csr.subject)
cert_builder = cert_builder.issuer_name(self.csr.subject)
cert_builder = cert_builder.serial_number(self.serial_number)
cert_builder = cert_builder.not_valid_before(self.notBefore)
cert_builder = cert_builder.not_valid_after(self.notAfter)
cert_builder = cert_builder.public_key(self.privatekey.public_key())
has_ski = False
for extension in self.csr.extensions:
if isinstance(extension.value, x509.SubjectKeyIdentifier):
if self.create_subject_key_identifier == 'always_create':
continue
has_ski = True
cert_builder = cert_builder.add_extension(extension.value, critical=extension.critical)
if not has_ski and self.create_subject_key_identifier != 'never_create':
cert_builder = cert_builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()),
critical=False
)
except ValueError as e:
raise CertificateError(str(e))
try:
certificate = cert_builder.sign(
private_key=self.privatekey, algorithm=self.digest,
backend=default_backend()
)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and self.digest is None:
module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
self.cert = certificate
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, certificate.public_bytes(Encoding.PEM))
self.changed = True
else:
self.cert = load_certificate(self.path, backend=self.backend)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.notAfter.strftime("%Y%m%d%H%M%SZ"),
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.not_valid_before.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.cert.not_valid_after.strftime("%Y%m%d%H%M%SZ"),
'serial_number': cryptography_serial_number_of_cert(self.cert),
})
return result
def generate_serial_number():
"""Generate a serial number for a certificate"""
while True:
result = randrange(0, 1 << 160)
if result >= 1000:
return result
class SelfSignedCertificate(Certificate):
"""Generate the self-signed certificate."""
def __init__(self, module):
super(SelfSignedCertificate, self).__init__(module, 'pyopenssl')
if module.params['selfsigned_create_subject_key_identifier'] != 'create_if_not_provided':
module.fail_json(msg='selfsigned_create_subject_key_identifier cannot be used with the pyOpenSSL backend!')
self.notBefore = get_relative_time_option(module.params['selfsigned_not_before'], 'selfsigned_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['selfsigned_not_after'], 'selfsigned_not_after', backend=self.backend)
self.digest = module.params['selfsigned_digest']
self.version = module.params['selfsigned_version']
self.serial_number = generate_serial_number()
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key file {0} does not exist'.format(self.privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
)
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
def generate(self, module):
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key %s does not exist' % self.privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert = crypto.X509()
cert.set_serial_number(self.serial_number)
cert.set_notBefore(to_bytes(self.notBefore))
cert.set_notAfter(to_bytes(self.notAfter))
cert.set_subject(self.csr.get_subject())
cert.set_issuer(self.csr.get_subject())
cert.set_version(self.version - 1)
cert.set_pubkey(self.csr.get_pubkey())
cert.add_extensions(self.csr.get_extensions())
cert.sign(self.privatekey, self.digest)
self.cert = cert
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore,
'notAfter': self.notAfter,
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.get_notBefore(),
'notAfter': self.cert.get_notAfter(),
'serial_number': self.cert.get_serial_number(),
})
return result
class OwnCACertificateCryptography(Certificate):
"""Generate the own CA certificate. Using the cryptography backend"""
def __init__(self, module):
super(OwnCACertificateCryptography, self).__init__(module, 'cryptography')
self.create_subject_key_identifier = module.params['ownca_create_subject_key_identifier']
self.create_authority_key_identifier = module.params['ownca_create_authority_key_identifier']
self.notBefore = get_relative_time_option(module.params['ownca_not_before'], 'ownca_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['ownca_not_after'], 'ownca_not_after', backend=self.backend)
self.digest = select_message_digest(module.params['ownca_digest'])
self.version = module.params['ownca_version']
self.serial_number = x509.random_serial_number()
self.ca_cert_path = module.params['ownca_path']
self.ca_cert_content = module.params['ownca_content']
if self.ca_cert_content is not None:
self.ca_cert_content = self.ca_cert_content.encode('utf-8')
self.ca_privatekey_path = module.params['ownca_privatekey_path']
self.ca_privatekey_content = module.params['ownca_privatekey_content']
if self.ca_privatekey_content is not None:
self.ca_privatekey_content = self.ca_privatekey_content.encode('utf-8')
self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate file {0} does not exist'.format(self.ca_cert_path)
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key file {0} does not exist'.format(self.ca_privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
self.ca_cert = load_certificate(
path=self.ca_cert_path,
content=self.ca_cert_content,
backend=self.backend
)
try:
self.ca_private_key = load_privatekey(
path=self.ca_privatekey_path,
content=self.ca_privatekey_content,
passphrase=self.ca_privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
if cryptography_key_needs_digest_for_signing(self.ca_private_key):
if self.digest is None:
raise CertificateError(
'The digest %s is not supported with the cryptography backend' % module.params['ownca_digest']
)
else:
self.digest = None
def generate(self, module):
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate %s does not exist' % self.ca_cert_path
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key %s does not exist' % self.ca_privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert_builder = x509.CertificateBuilder()
cert_builder = cert_builder.subject_name(self.csr.subject)
cert_builder = cert_builder.issuer_name(self.ca_cert.subject)
cert_builder = cert_builder.serial_number(self.serial_number)
cert_builder = cert_builder.not_valid_before(self.notBefore)
cert_builder = cert_builder.not_valid_after(self.notAfter)
cert_builder = cert_builder.public_key(self.csr.public_key())
has_ski = False
for extension in self.csr.extensions:
if isinstance(extension.value, x509.SubjectKeyIdentifier):
if self.create_subject_key_identifier == 'always_create':
continue
has_ski = True
if self.create_authority_key_identifier and isinstance(extension.value, x509.AuthorityKeyIdentifier):
continue
cert_builder = cert_builder.add_extension(extension.value, critical=extension.critical)
if not has_ski and self.create_subject_key_identifier != 'never_create':
cert_builder = cert_builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(self.csr.public_key()),
critical=False
)
if self.create_authority_key_identifier:
try:
ext = self.ca_cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
cert_builder = cert_builder.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext.value)
if CRYPTOGRAPHY_VERSION >= LooseVersion('2.7') else
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext),
critical=False
)
except cryptography.x509.ExtensionNotFound:
cert_builder = cert_builder.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_cert.public_key()),
critical=False
)
try:
certificate = cert_builder.sign(
private_key=self.ca_private_key, algorithm=self.digest,
backend=default_backend()
)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and self.digest is None:
module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
self.cert = certificate
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, certificate.public_bytes(Encoding.PEM))
self.changed = True
else:
self.cert = load_certificate(self.path, backend=self.backend)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
if not super(OwnCACertificateCryptography, self).check(module, perms_required):
return False
# Check AuthorityKeyIdentifier
if self.create_authority_key_identifier:
try:
ext = self.ca_cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
expected_ext = (
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext.value)
if CRYPTOGRAPHY_VERSION >= LooseVersion('2.7') else
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext)
)
except cryptography.x509.ExtensionNotFound:
expected_ext = x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_cert.public_key())
try:
ext = self.cert.extensions.get_extension_for_class(x509.AuthorityKeyIdentifier)
if ext.value != expected_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
'ca_cert': self.ca_cert_path,
'ca_privatekey': self.ca_privatekey_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.notAfter.strftime("%Y%m%d%H%M%SZ"),
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.not_valid_before.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.cert.not_valid_after.strftime("%Y%m%d%H%M%SZ"),
'serial_number': cryptography_serial_number_of_cert(self.cert),
})
return result
class OwnCACertificate(Certificate):
"""Generate the own CA certificate."""
def __init__(self, module):
super(OwnCACertificate, self).__init__(module, 'pyopenssl')
self.notBefore = get_relative_time_option(module.params['ownca_not_before'], 'ownca_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['ownca_not_after'], 'ownca_not_after', backend=self.backend)
self.digest = module.params['ownca_digest']
self.version = module.params['ownca_version']
self.serial_number = generate_serial_number()
if module.params['ownca_create_subject_key_identifier'] != 'create_if_not_provided':
module.fail_json(msg='ownca_create_subject_key_identifier cannot be used with the pyOpenSSL backend!')
if module.params['ownca_create_authority_key_identifier']:
module.warn('ownca_create_authority_key_identifier is ignored by the pyOpenSSL backend!')
self.ca_cert_path = module.params['ownca_path']
self.ca_cert_content = module.params['ownca_content']
if self.ca_cert_content is not None:
self.ca_cert_content = self.ca_cert_content.encode('utf-8')
self.ca_privatekey_path = module.params['ownca_privatekey_path']
self.ca_privatekey_content = module.params['ownca_privatekey_content']
if self.ca_privatekey_content is not None:
self.ca_privatekey_content = self.ca_privatekey_content.encode('utf-8')
self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate file {0} does not exist'.format(self.ca_cert_path)
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key file {0} does not exist'.format(self.ca_privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
)
self.ca_cert = load_certificate(
path=self.ca_cert_path,
content=self.ca_cert_content,
)
try:
self.ca_privatekey = load_privatekey(
path=self.ca_privatekey_path,
content=self.ca_privatekey_content,
passphrase=self.ca_privatekey_passphrase
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
def generate(self, module):
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate %s does not exist' % self.ca_cert_path
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key %s does not exist' % self.ca_privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert = crypto.X509()
cert.set_serial_number(self.serial_number)
cert.set_notBefore(to_bytes(self.notBefore))
cert.set_notAfter(to_bytes(self.notAfter))
cert.set_subject(self.csr.get_subject())
cert.set_issuer(self.ca_cert.get_subject())
cert.set_version(self.version - 1)
cert.set_pubkey(self.csr.get_pubkey())
cert.add_extensions(self.csr.get_extensions())
cert.sign(self.ca_privatekey, self.digest)
self.cert = cert
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
'ca_cert': self.ca_cert_path,
'ca_privatekey': self.ca_privatekey_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore,
'notAfter': self.notAfter,
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.get_notBefore(),
'notAfter': self.cert.get_notAfter(),
'serial_number': self.cert.get_serial_number(),
})
return result
def compare_sets(subset, superset, equality=False):
if equality:
return set(subset) == set(superset)
else:
return all(x in superset for x in subset)
def compare_dicts(subset, superset, equality=False):
if equality:
return subset == superset
else:
return all(superset.get(x) == v for x, v in subset.items())
NO_EXTENSION = 'no extension'
class AssertOnlyCertificateBase(Certificate):
def __init__(self, module, backend):
super(AssertOnlyCertificateBase, self).__init__(module, backend)
self.signature_algorithms = module.params['signature_algorithms']
if module.params['subject']:
self.subject = parse_name_field(module.params['subject'])
else:
self.subject = []
self.subject_strict = module.params['subject_strict']
if module.params['issuer']:
self.issuer = parse_name_field(module.params['issuer'])
else:
self.issuer = []
self.issuer_strict = module.params['issuer_strict']
self.has_expired = module.params['has_expired']
self.version = module.params['version']
self.key_usage = module.params['key_usage']
self.key_usage_strict = module.params['key_usage_strict']
self.extended_key_usage = module.params['extended_key_usage']
self.extended_key_usage_strict = module.params['extended_key_usage_strict']
self.subject_alt_name = module.params['subject_alt_name']
self.subject_alt_name_strict = module.params['subject_alt_name_strict']
self.not_before = module.params['not_before']
self.not_after = module.params['not_after']
self.valid_at = module.params['valid_at']
self.invalid_at = module.params['invalid_at']
self.valid_in = module.params['valid_in']
if self.valid_in and not self.valid_in.startswith("+") and not self.valid_in.startswith("-"):
try:
int(self.valid_in)
except ValueError:
module.fail_json(msg='The supplied value for "valid_in" (%s) is not an integer or a valid timespec' % self.valid_in)
self.valid_in = "+" + self.valid_in + "s"
# Load objects
self.cert = load_certificate(self.path, backend=self.backend)
if self.privatekey_path is not None or self.privatekey_content is not None:
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
raise CertificateError(exc)
if self.csr_path is not None or self.csr_content is not None:
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
@abc.abstractmethod
def _validate_privatekey(self):
pass
@abc.abstractmethod
def _validate_csr_signature(self):
pass
@abc.abstractmethod
def _validate_csr_subject(self):
pass
@abc.abstractmethod
def _validate_csr_extensions(self):
pass
@abc.abstractmethod
def _validate_signature_algorithms(self):
pass
@abc.abstractmethod
def _validate_subject(self):
pass
@abc.abstractmethod
def _validate_issuer(self):
pass
@abc.abstractmethod
def _validate_has_expired(self):
pass
@abc.abstractmethod
def _validate_version(self):
pass
@abc.abstractmethod
def _validate_key_usage(self):
pass
@abc.abstractmethod
def _validate_extended_key_usage(self):
pass
@abc.abstractmethod
def _validate_subject_alt_name(self):
pass
@abc.abstractmethod
def _validate_not_before(self):
pass
@abc.abstractmethod
def _validate_not_after(self):
pass
@abc.abstractmethod
def _validate_valid_at(self):
pass
@abc.abstractmethod
def _validate_invalid_at(self):
pass
@abc.abstractmethod
def _validate_valid_in(self):
pass
def assertonly(self, module):
messages = []
if self.privatekey_path is not None or self.privatekey_content is not None:
if not self._validate_privatekey():
messages.append(
'Certificate %s and private key %s do not match' %
(self.path, self.privatekey_path or '(provided in module options)')
)
if self.csr_path is not None or self.csr_content is not None:
if not self._validate_csr_signature():
messages.append(
'Certificate %s and CSR %s do not match: private key mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if not self._validate_csr_subject():
messages.append(
'Certificate %s and CSR %s do not match: subject mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if not self._validate_csr_extensions():
messages.append(
'Certificate %s and CSR %s do not match: extensions mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if self.signature_algorithms is not None:
wrong_alg = self._validate_signature_algorithms()
if wrong_alg:
messages.append(
'Invalid signature algorithm (got %s, expected one of %s)' %
(wrong_alg, self.signature_algorithms)
)
if self.subject is not None:
failure = self._validate_subject()
if failure:
dummy, cert_subject = failure
messages.append(
'Invalid subject component (got %s, expected all of %s to be present)' %
(cert_subject, self.subject)
)
if self.issuer is not None:
failure = self._validate_issuer()
if failure:
dummy, cert_issuer = failure
messages.append(
'Invalid issuer component (got %s, expected all of %s to be present)' % (cert_issuer, self.issuer)
)
if self.has_expired is not None:
cert_expired = self._validate_has_expired()
if cert_expired != self.has_expired:
messages.append(
'Certificate expiration check failed (certificate expiration is %s, expected %s)' %
(cert_expired, self.has_expired)
)
if self.version is not None:
cert_version = self._validate_version()
if cert_version != self.version:
messages.append(
'Invalid certificate version number (got %s, expected %s)' %
(cert_version, self.version)
)
if self.key_usage is not None:
failure = self._validate_key_usage()
if failure == NO_EXTENSION:
messages.append('Found no keyUsage extension')
elif failure:
dummy, cert_key_usage = failure
messages.append(
'Invalid keyUsage components (got %s, expected all of %s to be present)' %
(cert_key_usage, self.key_usage)
)
if self.extended_key_usage is not None:
failure = self._validate_extended_key_usage()
if failure == NO_EXTENSION:
messages.append('Found no extendedKeyUsage extension')
elif failure:
dummy, ext_cert_key_usage = failure
messages.append(
'Invalid extendedKeyUsage component (got %s, expected all of %s to be present)' % (ext_cert_key_usage, self.extended_key_usage)
)
if self.subject_alt_name is not None:
failure = self._validate_subject_alt_name()
if failure == NO_EXTENSION:
messages.append('Found no subjectAltName extension')
elif failure:
dummy, cert_san = failure
messages.append(
'Invalid subjectAltName component (got %s, expected all of %s to be present)' %
(cert_san, self.subject_alt_name)
)
if self.not_before is not None:
cert_not_valid_before = self._validate_not_before()
if cert_not_valid_before != get_relative_time_option(self.not_before, 'not_before', backend=self.backend):
messages.append(
'Invalid not_before component (got %s, expected %s to be present)' %
(cert_not_valid_before, self.not_before)
)
if self.not_after is not None:
cert_not_valid_after = self._validate_not_after()
if cert_not_valid_after != get_relative_time_option(self.not_after, 'not_after', backend=self.backend):
messages.append(
'Invalid not_after component (got %s, expected %s to be present)' %
(cert_not_valid_after, self.not_after)
)
if self.valid_at is not None:
not_before, valid_at, not_after = self._validate_valid_at()
if not (not_before <= valid_at <= not_after):
messages.append(
'Certificate is not valid for the specified date (%s) - not_before: %s - not_after: %s' %
(self.valid_at, not_before, not_after)
)
if self.invalid_at is not None:
not_before, invalid_at, not_after = self._validate_invalid_at()
if not_before <= invalid_at <= not_after:
messages.append(
'Certificate is not invalid for the specified date (%s) - not_before: %s - not_after: %s' %
(self.invalid_at, not_before, not_after)
)
if self.valid_in is not None:
not_before, valid_in, not_after = self._validate_valid_in()
if not not_before <= valid_in <= not_after:
messages.append(
'Certificate is not valid in %s from now (that would be %s) - not_before: %s - not_after: %s' %
(self.valid_in, valid_in, not_before, not_after)
)
return messages
def generate(self, module):
"""Don't generate anything - only assert"""
messages = self.assertonly(module)
if messages:
module.fail_json(msg=' | '.join(messages))
def check(self, module, perms_required=False):
"""Ensure the resource is in its desired state."""
messages = self.assertonly(module)
return len(messages) == 0
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
}
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
return result
class AssertOnlyCertificateCryptography(AssertOnlyCertificateBase):
"""Validate the supplied cert, using the cryptography backend"""
def __init__(self, module):
super(AssertOnlyCertificateCryptography, self).__init__(module, 'cryptography')
def _validate_privatekey(self):
return cryptography_compare_public_keys(self.cert.public_key(), self.privatekey.public_key())
def _validate_csr_signature(self):
if not self.csr.is_signature_valid:
return False
return cryptography_compare_public_keys(self.csr.public_key(), self.cert.public_key())
def _validate_csr_subject(self):
return self.csr.subject == self.cert.subject
def _validate_csr_extensions(self):
cert_exts = self.cert.extensions
csr_exts = self.csr.extensions
if len(cert_exts) != len(csr_exts):
return False
for cert_ext in cert_exts:
try:
csr_ext = csr_exts.get_extension_for_oid(cert_ext.oid)
if cert_ext != csr_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def _validate_signature_algorithms(self):
if self.cert.signature_algorithm_oid._name not in self.signature_algorithms:
return self.cert.signature_algorithm_oid._name
def _validate_subject(self):
expected_subject = Name([NameAttribute(oid=cryptography_name_to_oid(sub[0]), value=to_text(sub[1]))
for sub in self.subject])
cert_subject = self.cert.subject
if not compare_sets(expected_subject, cert_subject, self.subject_strict):
return expected_subject, cert_subject
def _validate_issuer(self):
expected_issuer = Name([NameAttribute(oid=cryptography_name_to_oid(iss[0]), value=to_text(iss[1]))
for iss in self.issuer])
cert_issuer = self.cert.issuer
if not compare_sets(expected_issuer, cert_issuer, self.issuer_strict):
return self.issuer, cert_issuer
def _validate_has_expired(self):
cert_not_after = self.cert.not_valid_after
cert_expired = cert_not_after < datetime.datetime.utcnow()
return cert_expired
def _validate_version(self):
if self.cert.version == x509.Version.v1:
return 1
if self.cert.version == x509.Version.v3:
return 3
return "unknown"
def _validate_key_usage(self):
try:
current_key_usage = self.cert.extensions.get_extension_for_class(x509.KeyUsage).value
test_key_usage = dict(
digital_signature=current_key_usage.digital_signature,
content_commitment=current_key_usage.content_commitment,
key_encipherment=current_key_usage.key_encipherment,
data_encipherment=current_key_usage.data_encipherment,
key_agreement=current_key_usage.key_agreement,
key_cert_sign=current_key_usage.key_cert_sign,
crl_sign=current_key_usage.crl_sign,
encipher_only=False,
decipher_only=False
)
if test_key_usage['key_agreement']:
test_key_usage.update(dict(
encipher_only=current_key_usage.encipher_only,
decipher_only=current_key_usage.decipher_only
))
key_usages = cryptography_parse_key_usage_params(self.key_usage)
if not compare_dicts(key_usages, test_key_usage, self.key_usage_strict):
return self.key_usage, [k for k, v in test_key_usage.items() if v is True]
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.key_usage:
return NO_EXTENSION
def _validate_extended_key_usage(self):
try:
current_ext_keyusage = self.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage).value
usages = [cryptography_name_to_oid(usage) for usage in self.extended_key_usage]
expected_ext_keyusage = x509.ExtendedKeyUsage(usages)
if not compare_sets(expected_ext_keyusage, current_ext_keyusage, self.extended_key_usage_strict):
return [eku.value for eku in expected_ext_keyusage], [eku.value for eku in current_ext_keyusage]
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.extended_key_usage:
return NO_EXTENSION
def _validate_subject_alt_name(self):
try:
current_san = self.cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
expected_san = [cryptography_get_name(san) for san in self.subject_alt_name]
if not compare_sets(expected_san, current_san, self.subject_alt_name_strict):
return self.subject_alt_name, current_san
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.subject_alt_name:
return NO_EXTENSION
def _validate_not_before(self):
return self.cert.not_valid_before
def _validate_not_after(self):
return self.cert.not_valid_after
def _validate_valid_at(self):
rt = get_relative_time_option(self.valid_at, 'valid_at', backend=self.backend)
return self.cert.not_valid_before, rt, self.cert.not_valid_after
def _validate_invalid_at(self):
rt = get_relative_time_option(self.invalid_at, 'invalid_at', backend=self.backend)
return self.cert.not_valid_before, rt, self.cert.not_valid_after
def _validate_valid_in(self):
valid_in_date = get_relative_time_option(self.valid_in, "valid_in", backend=self.backend)
return self.cert.not_valid_before, valid_in_date, self.cert.not_valid_after
class AssertOnlyCertificate(AssertOnlyCertificateBase):
"""validate the supplied certificate."""
def __init__(self, module):
super(AssertOnlyCertificate, self).__init__(module, 'pyopenssl')
# Ensure inputs are properly sanitized before comparison.
for param in ['signature_algorithms', 'key_usage', 'extended_key_usage',
'subject_alt_name', 'subject', 'issuer', 'not_before',
'not_after', 'valid_at', 'invalid_at']:
attr = getattr(self, param)
if isinstance(attr, list) and attr:
if isinstance(attr[0], str):
setattr(self, param, [to_bytes(item) for item in attr])
elif isinstance(attr[0], tuple):
setattr(self, param, [(to_bytes(item[0]), to_bytes(item[1])) for item in attr])
elif isinstance(attr, tuple):
setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
elif isinstance(attr, dict):
setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
elif isinstance(attr, str):
setattr(self, param, to_bytes(attr))
def _validate_privatekey(self):
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey(self.privatekey)
ctx.use_certificate(self.cert)
try:
ctx.check_privatekey()
return True
except OpenSSL.SSL.Error:
return False
def _validate_csr_signature(self):
try:
self.csr.verify(self.cert.get_pubkey())
except OpenSSL.crypto.Error:
return False
def _validate_csr_subject(self):
if self.csr.get_subject() != self.cert.get_subject():
return False
def _validate_csr_extensions(self):
csr_extensions = self.csr.get_extensions()
cert_extension_count = self.cert.get_extension_count()
if len(csr_extensions) != cert_extension_count:
return False
for extension_number in range(0, cert_extension_count):
cert_extension = self.cert.get_extension(extension_number)
csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
if cert_extension.get_data() != list(csr_extension)[0].get_data():
return False
return True
def _validate_signature_algorithms(self):
if self.cert.get_signature_algorithm() not in self.signature_algorithms:
return self.cert.get_signature_algorithm()
def _validate_subject(self):
expected_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in self.subject]
cert_subject = self.cert.get_subject().get_components()
current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in cert_subject]
if not compare_sets(expected_subject, current_subject, self.subject_strict):
return expected_subject, current_subject
def _validate_issuer(self):
expected_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in self.issuer]
cert_issuer = self.cert.get_issuer().get_components()
current_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in cert_issuer]
if not compare_sets(expected_issuer, current_issuer, self.issuer_strict):
return self.issuer, cert_issuer
def _validate_has_expired(self):
# The following 3 lines are the same as the current PyOpenSSL code for cert.has_expired().
# Older version of PyOpenSSL have a buggy implementation,
# to avoid issues with those we added the code from a more recent release here.
time_string = to_native(self.cert.get_notAfter())
not_after = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
cert_expired = not_after < datetime.datetime.utcnow()
return cert_expired
def _validate_version(self):
# Version numbers in certs are off by one:
# v1: 0, v2: 1, v3: 2 ...
return self.cert.get_version() + 1
def _validate_key_usage(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'keyUsage':
found = True
expected_extension = crypto.X509Extension(b"keyUsage", False, b', '.join(self.key_usage))
key_usage = [usage.strip() for usage in to_text(expected_extension, errors='surrogate_or_strict').split(',')]
current_ku = [usage.strip() for usage in to_text(extension, errors='surrogate_or_strict').split(',')]
if not compare_sets(key_usage, current_ku, self.key_usage_strict):
return self.key_usage, str(extension).split(', ')
if not found:
# This is only bad if the user specified a non-empty list
if self.key_usage:
return NO_EXTENSION
def _validate_extended_key_usage(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'extendedKeyUsage':
found = True
extKeyUsage = [OpenSSL._util.lib.OBJ_txt2nid(keyUsage) for keyUsage in self.extended_key_usage]
current_xku = [OpenSSL._util.lib.OBJ_txt2nid(usage.strip()) for usage in
to_bytes(extension, errors='surrogate_or_strict').split(b',')]
if not compare_sets(extKeyUsage, current_xku, self.extended_key_usage_strict):
return self.extended_key_usage, str(extension).split(', ')
if not found:
# This is only bad if the user specified a non-empty list
if self.extended_key_usage:
return NO_EXTENSION
def _validate_subject_alt_name(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'subjectAltName':
found = True
l_altnames = [pyopenssl_normalize_name_attribute(altname.strip()) for altname in
to_text(extension, errors='surrogate_or_strict').split(', ')]
sans = [pyopenssl_normalize_name_attribute(to_text(san, errors='surrogate_or_strict')) for san in self.subject_alt_name]
if not compare_sets(sans, l_altnames, self.subject_alt_name_strict):
return self.subject_alt_name, l_altnames
if not found:
# This is only bad if the user specified a non-empty list
if self.subject_alt_name:
return NO_EXTENSION
def _validate_not_before(self):
return self.cert.get_notBefore()
def _validate_not_after(self):
return self.cert.get_notAfter()
def _validate_valid_at(self):
rt = get_relative_time_option(self.valid_at, "valid_at", backend=self.backend)
rt = to_bytes(rt, errors='surrogate_or_strict')
return self.cert.get_notBefore(), rt, self.cert.get_notAfter()
def _validate_invalid_at(self):
rt = get_relative_time_option(self.invalid_at, "invalid_at", backend=self.backend)
rt = to_bytes(rt, errors='surrogate_or_strict')
return self.cert.get_notBefore(), rt, self.cert.get_notAfter()
def _validate_valid_in(self):
valid_in_asn1 = get_relative_time_option(self.valid_in, "valid_in", backend=self.backend)
valid_in_date = to_bytes(valid_in_asn1, errors='surrogate_or_strict')
return self.cert.get_notBefore(), valid_in_date, self.cert.get_notAfter()
class EntrustCertificate(Certificate):
"""Retrieve a certificate using Entrust (ECS)."""
def __init__(self, module, backend):
super(EntrustCertificate, self).__init__(module, backend)
self.trackingId = None
self.notAfter = get_relative_time_option(module.params['entrust_not_after'], 'entrust_not_after', backend=self.backend)
if self.csr_content is None or not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend,
)
# ECS API defaults to using the validated organization tied to the account.
# We want to always force behavior of trying to use the organization provided in the CSR.
# To that end we need to parse out the organization from the CSR.
self.csr_org = None
if self.backend == 'pyopenssl':
csr_subject = self.csr.get_subject()
csr_subject_components = csr_subject.get_components()
for k, v in csr_subject_components:
if k.upper() == 'O':
# Entrust does not support multiple validated organizations in a single certificate
if self.csr_org is not None:
module.fail_json(msg=("Entrust provider does not currently support multiple validated organizations. Multiple organizations found in "
"Subject DN: '{0}'. ".format(csr_subject)))
else:
self.csr_org = v
elif self.backend == 'cryptography':
csr_subject_orgs = self.csr.subject.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)
if len(csr_subject_orgs) == 1:
self.csr_org = csr_subject_orgs[0].value
elif len(csr_subject_orgs) > 1:
module.fail_json(msg=("Entrust provider does not currently support multiple validated organizations. Multiple organizations found in "
"Subject DN: '{0}'. ".format(self.csr.subject)))
# If no organization in the CSR, explicitly tell ECS that it should be blank in issued cert, not defaulted to
# organization tied to the account.
if self.csr_org is None:
self.csr_org = ''
try:
self.ecs_client = ECSClient(
entrust_api_user=module.params.get('entrust_api_user'),
entrust_api_key=module.params.get('entrust_api_key'),
entrust_api_cert=module.params.get('entrust_api_client_cert_path'),
entrust_api_cert_key=module.params.get('entrust_api_client_cert_key_path'),
entrust_api_specification_path=module.params.get('entrust_api_specification_path')
)
except SessionConfigurationException as e:
module.fail_json(msg='Failed to initialize Entrust Provider: {0}'.format(to_native(e.message)))
def generate(self, module):
if not self.check(module, perms_required=False) or self.force:
# Read the CSR that was generated for us
body = {}
if self.csr_content is not None:
body['csr'] = self.csr_content
else:
with open(self.csr_path, 'r') as csr_file:
body['csr'] = csr_file.read()
body['certType'] = module.params['entrust_cert_type']
# Handle expiration (30 days if not specified)
expiry = self.notAfter
if not expiry:
gmt_now = datetime.datetime.fromtimestamp(time.mktime(time.gmtime()))
expiry = gmt_now + datetime.timedelta(days=365)
expiry_iso3339 = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z")
body['certExpiryDate'] = expiry_iso3339
body['org'] = self.csr_org
body['tracking'] = {
'requesterName': module.params['entrust_requester_name'],
'requesterEmail': module.params['entrust_requester_email'],
'requesterPhone': module.params['entrust_requester_phone'],
}
try:
result = self.ecs_client.NewCertRequest(Body=body)
self.trackingId = result.get('trackingId')
except RestOperationException as e:
module.fail_json(msg='Failed to request new certificate from Entrust Certificate Services (ECS): {0}'.format(to_native(e.message)))
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, to_bytes(result.get('endEntityCert')))
self.cert = load_certificate(self.path, backend=self.backend)
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
parent_check = super(EntrustCertificate, self).check(module, perms_required)
try:
cert_details = self._get_cert_details()
except RestOperationException as e:
module.fail_json(msg='Failed to get status of existing certificate from Entrust Certificate Services (ECS): {0}.'.format(to_native(e.message)))
# Always issue a new certificate if the certificate is expired, suspended or revoked
status = cert_details.get('status', False)
if status == 'EXPIRED' or status == 'SUSPENDED' or status == 'REVOKED':
return False
# If the requested cert type was specified and it is for a different certificate type than the initial certificate, a new one is needed
if module.params['entrust_cert_type'] and cert_details.get('certType') and module.params['entrust_cert_type'] != cert_details.get('certType'):
return False
return parent_check
def _get_cert_details(self):
cert_details = {}
if self.cert:
serial_number = None
expiry = None
if self.backend == 'pyopenssl':
serial_number = "{0:X}".format(self.cert.get_serial_number())
time_string = to_native(self.cert.get_notAfter())
expiry = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
elif self.backend == 'cryptography':
serial_number = "{0:X}".format(cryptography_serial_number_of_cert(self.cert))
expiry = self.cert.not_valid_after
# get some information about the expiry of this certificate
expiry_iso3339 = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z")
cert_details['expiresAfter'] = expiry_iso3339
# If a trackingId is not already defined (from the result of a generate)
# use the serial number to identify the tracking Id
if self.trackingId is None and serial_number is not None:
cert_results = self.ecs_client.GetCertificates(serialNumber=serial_number).get('certificates', {})
# Finding 0 or more than 1 result is a very unlikely use case, it simply means we cannot perform additional checks
# on the 'state' as returned by Entrust Certificate Services (ECS). The general certificate validity is
# still checked as it is in the rest of the module.
if len(cert_results) == 1:
self.trackingId = cert_results[0].get('trackingId')
if self.trackingId is not None:
cert_details.update(self.ecs_client.GetCertificate(trackingId=self.trackingId))
return cert_details
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
result.update(self._get_cert_details())
return result
class AcmeCertificate(Certificate):
"""Retrieve a certificate using the ACME protocol."""
# Since there's no real use of the backend,
# other than the 'self.check' function, we just pass the backend to the constructor
def __init__(self, module, backend):
super(AcmeCertificate, self).__init__(module, backend)
self.accountkey_path = module.params['acme_accountkey_path']
self.challenge_path = module.params['acme_challenge_path']
self.use_chain = module.params['acme_chain']
self.acme_directory = module.params['acme_directory']
def generate(self, module):
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not os.path.exists(self.accountkey_path):
raise CertificateError(
'The account key %s does not exist' % self.accountkey_path
)
if not os.path.exists(self.challenge_path):
raise CertificateError(
'The challenge path %s does not exist' % self.challenge_path
)
if not self.check(module, perms_required=False) or self.force:
acme_tiny_path = self.module.get_bin_path('acme-tiny', required=True)
command = [acme_tiny_path]
if self.use_chain:
command.append('--chain')
command.extend(['--account-key', self.accountkey_path])
if self.csr_content is not None:
# We need to temporarily write the CSR to disk
fd, tmpsrc = tempfile.mkstemp()
module.add_cleanup_file(tmpsrc) # Ansible will delete the file on exit
f = os.fdopen(fd, 'wb')
try:
f.write(self.csr_content)
except Exception as err:
try:
f.close()
except Exception as dummy:
pass
module.fail_json(
msg="failed to create temporary CSR file: %s" % to_native(err),
exception=traceback.format_exc()
)
f.close()
command.extend(['--csr', tmpsrc])
else:
command.extend(['--csr', self.csr_path])
command.extend(['--acme-dir', self.challenge_path])
command.extend(['--directory-url', self.acme_directory])
try:
crt = module.run_command(command, check_rc=True)[1]
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, to_bytes(crt))
self.changed = True
except OSError as exc:
raise CertificateError(exc)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'accountkey': self.accountkey_path,
'csr': self.csr_path,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
path=dict(type='path', required=True),
provider=dict(type='str', choices=['acme', 'assertonly', 'entrust', 'ownca', 'selfsigned']),
force=dict(type='bool', default=False,),
csr_path=dict(type='path'),
csr_content=dict(type='str'),
backup=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
return_content=dict(type='bool', default=False),
# General properties of a certificate
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
privatekey_passphrase=dict(type='str', no_log=True),
# provider: assertonly
signature_algorithms=dict(type='list', elements='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_strict=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
issuer=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
issuer_strict=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
has_expired=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
version=dict(type='int', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
key_usage=dict(type='list', elements='str', aliases=['keyUsage'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
key_usage_strict=dict(type='bool', default=False, aliases=['keyUsage_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
extended_key_usage=dict(type='list', elements='str', aliases=['extendedKeyUsage'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
extended_key_usage_strict=dict(type='bool', default=False, aliases=['extendedKeyUsage_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_alt_name=dict(type='list', elements='str', aliases=['subjectAltName'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_alt_name_strict=dict(type='bool', default=False, aliases=['subjectAltName_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
not_before=dict(type='str', aliases=['notBefore'], removed_in_version='2.0.0', removed_from_collection='community.crypto'),
not_after=dict(type='str', aliases=['notAfter'], removed_in_version='2.0.0', removed_from_collection='community.crypto'),
valid_at=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
invalid_at=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
valid_in=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
# provider: selfsigned
selfsigned_version=dict(type='int', default=3),
selfsigned_digest=dict(type='str', default='sha256'),
selfsigned_not_before=dict(type='str', default='+0s', aliases=['selfsigned_notBefore']),
selfsigned_not_after=dict(type='str', default='+3650d', aliases=['selfsigned_notAfter']),
selfsigned_create_subject_key_identifier=dict(
type='str',
default='create_if_not_provided',
choices=['create_if_not_provided', 'always_create', 'never_create']
),
# provider: ownca
ownca_path=dict(type='path'),
ownca_content=dict(type='str'),
ownca_privatekey_path=dict(type='path'),
ownca_privatekey_content=dict(type='str', no_log=True),
ownca_privatekey_passphrase=dict(type='str', no_log=True),
ownca_digest=dict(type='str', default='sha256'),
ownca_version=dict(type='int', default=3),
ownca_not_before=dict(type='str', default='+0s'),
ownca_not_after=dict(type='str', default='+3650d'),
ownca_create_subject_key_identifier=dict(
type='str',
default='create_if_not_provided',
choices=['create_if_not_provided', 'always_create', 'never_create']
),
ownca_create_authority_key_identifier=dict(type='bool', default=True),
# provider: acme
acme_accountkey_path=dict(type='path'),
acme_challenge_path=dict(type='path'),
acme_chain=dict(type='bool', default=False),
acme_directory=dict(type='str', default="https://acme-v02.api.letsencrypt.org/directory"),
# provider: entrust
entrust_cert_type=dict(type='str', default='STANDARD_SSL',
choices=['STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL',
'PRIVATE_SSL', 'PD_SSL', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT']),
entrust_requester_email=dict(type='str'),
entrust_requester_name=dict(type='str'),
entrust_requester_phone=dict(type='str'),
entrust_api_user=dict(type='str'),
entrust_api_key=dict(type='str', no_log=True),
entrust_api_client_cert_path=dict(type='path'),
entrust_api_client_cert_key_path=dict(type='path', no_log=True),
entrust_api_specification_path=dict(type='path', default='https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml'),
entrust_not_after=dict(type='str', default='+365d'),
),
supports_check_mode=True,
add_file_common_args=True,
required_if=[
['state', 'present', ['provider']],
['provider', 'entrust', ['entrust_requester_email', 'entrust_requester_name', 'entrust_requester_phone',
'entrust_api_user', 'entrust_api_key', 'entrust_api_client_cert_path',
'entrust_api_client_cert_key_path']],
],
mutually_exclusive=[
['csr_path', 'csr_content'],
['privatekey_path', 'privatekey_content'],
['ownca_path', 'ownca_content'],
['ownca_privatekey_path', 'ownca_privatekey_content'],
],
)
if module._name == 'community.crypto.openssl_certificate':
module.deprecate("The 'community.crypto.openssl_certificate' module has been renamed to 'community.crypto.x509_certificate'",
version='2.0.0', collection_name='community.crypto')
try:
if module.params['state'] == 'absent':
certificate = CertificateAbsent(module)
else:
if module.params['provider'] != 'assertonly' and module.params['csr_path'] is None and module.params['csr_content'] is None:
module.fail_json(msg='csr_path or csr_content is required when provider is not assertonly')
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
provider = module.params['provider']
if provider == 'assertonly':
module.deprecate("The 'assertonly' provider is deprecated; please see the examples of "
"the 'x509_certificate' module on how to replace it with other modules",
version='2.0.0', collection_name='community.crypto')
elif provider == 'selfsigned':
if module.params['privatekey_path'] is None and module.params['privatekey_content'] is None:
module.fail_json(msg='One of privatekey_path and privatekey_content must be specified for the selfsigned provider.')
elif provider == 'acme':
if module.params['acme_accountkey_path'] is None:
module.fail_json(msg='The acme_accountkey_path option must be specified for the acme provider.')
if module.params['acme_challenge_path'] is None:
module.fail_json(msg='The acme_challenge_path option must be specified for the acme provider.')
elif provider == 'ownca':
if module.params['ownca_path'] is None and module.params['ownca_content'] is None:
module.fail_json(msg='One of ownca_path and ownca_content must be specified for the ownca provider.')
if module.params['ownca_privatekey_path'] is None and module.params['ownca_privatekey_content'] is None:
module.fail_json(msg='One of ownca_privatekey_path and ownca_privatekey_content must be specified for the ownca provider.')
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detect what backend we can use
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# If cryptography is available we'll use it
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
if module.params['selfsigned_version'] == 2 or module.params['ownca_version'] == 2:
module.warn('crypto backend forced to pyopenssl. The cryptography library does not support v2 certificates')
backend = 'pyopenssl'
# Fail if no backend has been found
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
if module.params['provider'] in ['selfsigned', 'ownca', 'assertonly']:
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15')
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
if provider == 'selfsigned':
certificate = SelfSignedCertificate(module)
elif provider == 'acme':
certificate = AcmeCertificate(module, 'pyopenssl')
elif provider == 'ownca':
certificate = OwnCACertificate(module)
elif provider == 'entrust':
certificate = EntrustCertificate(module, 'pyopenssl')
else:
certificate = AssertOnlyCertificate(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
if module.params['selfsigned_version'] == 2 or module.params['ownca_version'] == 2:
module.fail_json(msg='The cryptography backend does not support v2 certificates, '
'use select_crypto_backend=pyopenssl for v2 certificates')
if provider == 'selfsigned':
certificate = SelfSignedCertificateCryptography(module)
elif provider == 'acme':
certificate = AcmeCertificate(module, 'cryptography')
elif provider == 'ownca':
certificate = OwnCACertificateCryptography(module)
elif provider == 'entrust':
certificate = EntrustCertificate(module, 'cryptography')
else:
certificate = AssertOnlyCertificateCryptography(module)
if module.params['state'] == 'present':
if module.check_mode:
result = certificate.dump(check_mode=True)
result['changed'] = module.params['force'] or not certificate.check(module)
module.exit_json(**result)
certificate.generate(module)
else:
if module.check_mode:
result = certificate.dump(check_mode=True)
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
certificate.remove(module)
result = certificate.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/good_4295_6 |
crossvul-python_data_good_4295_2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_privatekey
short_description: Generate OpenSSL private keys
description:
- This module allows one to (re)generate OpenSSL private keys.
- One can generate L(RSA,https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29),
L(DSA,https://en.wikipedia.org/wiki/Digital_Signature_Algorithm),
L(ECC,https://en.wikipedia.org/wiki/Elliptic-curve_cryptography) or
L(EdDSA,https://en.wikipedia.org/wiki/EdDSA) private keys.
- Keys are generated in PEM format.
- "Please note that the module regenerates private keys if they don't match
the module's options. In particular, if you provide another passphrase
(or specify none), change the keysize, etc., the private key will be
regenerated. If you are concerned that this could **overwrite your private key**,
consider using the I(backup) option."
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in Ansible 2.13."
requirements:
- Either cryptography >= 1.2.3 (older versions might work as well)
- Or pyOpenSSL
author:
- Yanis Guenane (@Spredzy)
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the private key should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
size:
description:
- Size (in bits) of the TLS/SSL key to generate.
type: int
default: 4096
type:
description:
- The algorithm used to generate the TLS/SSL private key.
- Note that C(ECC), C(X25519), C(X448), C(Ed25519) and C(Ed448) require the C(cryptography) backend.
C(X25519) needs cryptography 2.5 or newer, while C(X448), C(Ed25519) and C(Ed448) require
cryptography 2.6 or newer. For C(ECC), the minimal cryptography version required depends on the
I(curve) option.
type: str
default: RSA
choices: [ DSA, ECC, Ed25519, Ed448, RSA, X25519, X448 ]
curve:
description:
- Note that not all curves are supported by all versions of C(cryptography).
- For maximal interoperability, C(secp384r1) or C(secp256r1) should be used.
- We use the curve names as defined in the
L(IANA registry for TLS,https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8).
type: str
choices:
- secp384r1
- secp521r1
- secp224r1
- secp192r1
- secp256r1
- secp256k1
- brainpoolP256r1
- brainpoolP384r1
- brainpoolP512r1
- sect571k1
- sect409k1
- sect283k1
- sect233k1
- sect163k1
- sect571r1
- sect409r1
- sect283r1
- sect233r1
- sect163r2
force:
description:
- Should the key be regenerated even if it already exists.
type: bool
default: no
path:
description:
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
type: path
required: true
passphrase:
description:
- The passphrase for the private key.
type: str
cipher:
description:
- The cipher to encrypt the private key. (Valid values can be found by
running `openssl list -cipher-algorithms` or `openssl list-cipher-algorithms`,
depending on your OpenSSL version.)
- When using the C(cryptography) backend, use C(auto).
type: str
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
format:
description:
- Determines which format the private key is written in. By default, PKCS1 (traditional OpenSSL format)
is used for all keys which support it. Please note that not every key can be exported in any format.
- The value C(auto) selects a fromat based on the key format. The value C(auto_ignore) does the same,
but for existing private key files, it will not force a regenerate when its format is not the automatically
selected one for generation.
- Note that if the format for an existing private key mismatches, the key is *regenerated* by default.
To change this behavior, use the I(format_mismatch) option.
- The I(format) option is only supported by the C(cryptography) backend. The C(pyopenssl) backend will
fail if a value different from C(auto_ignore) is used.
type: str
default: auto_ignore
choices: [ pkcs1, pkcs8, raw, auto, auto_ignore ]
version_added: '1.0.0'
format_mismatch:
description:
- Determines behavior of the module if the format of a private key does not match the expected format, but all
other parameters are as expected.
- If set to C(regenerate) (default), generates a new private key.
- If set to C(convert), the key will be converted to the new format instead.
- Only supported by the C(cryptography) backend.
type: str
default: regenerate
choices: [ regenerate, convert ]
version_added: '1.0.0'
backup:
description:
- Create a backup file including a timestamp so you can get
the original private key back if you overwrote it with a new one by accident.
type: bool
default: no
return_content:
description:
- If set to C(yes), will return the (current or generated) private key's content as I(privatekey).
- Note that especially if the private key is not encrypted, you have to make sure that the returned
value is treated appropriately and not accidentally written to logs etc.! Use with care!
- Use Ansible's I(no_log) task option to avoid the output being shown. See also
U(https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#how-do-i-keep-secret-data-in-my-playbook).
type: bool
default: no
version_added: '1.0.0'
regenerate:
description:
- Allows to configure in which situations the module is allowed to regenerate private keys.
The module will always generate a new key if the destination file does not exist.
- By default, the key will be regenerated when it doesn't match the module's options,
except when the key cannot be read or the passphrase does not match. Please note that
this B(changed) for Ansible 2.10. For Ansible 2.9, the behavior was as if C(full_idempotence)
is specified.
- If set to C(never), the module will fail if the key cannot be read or the passphrase
isn't matching, and will never regenerate an existing key.
- If set to C(fail), the module will fail if the key does not correspond to the module's
options.
- If set to C(partial_idempotence), the key will be regenerated if it does not conform to
the module's options. The key is B(not) regenerated if it cannot be read (broken file),
the key is protected by an unknown passphrase, or when they key is not protected by a
passphrase, but a passphrase is specified.
- If set to C(full_idempotence), the key will be regenerated if it does not conform to the
module's options. This is also the case if the key cannot be read (broken file), the key
is protected by an unknown passphrase, or when they key is not protected by a passphrase,
but a passphrase is specified. Make sure you have a B(backup) when using this option!
- If set to C(always), the module will always regenerate the key. This is equivalent to
setting I(force) to C(yes).
- Note that if I(format_mismatch) is set to C(convert) and everything matches except the
format, the key will always be converted, except if I(regenerate) is set to C(always).
type: str
choices:
- never
- fail
- partial_idempotence
- full_idempotence
- always
default: full_idempotence
version_added: '1.0.0'
extends_documentation_fragment:
- files
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA) and a passphrase
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
passphrase: ansible
cipher: aes256
- name: Generate an OpenSSL private key with a different size (2048 bits)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
size: 2048
- name: Force regenerate an OpenSSL private key if it already exists
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
force: yes
- name: Generate an OpenSSL private key with a different algorithm (DSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
type: DSA
'''
RETURN = r'''
size:
description: Size (in bits) of the TLS/SSL private key.
returned: changed or success
type: int
sample: 4096
type:
description: Algorithm used to generate the TLS/SSL private key.
returned: changed or success
type: str
sample: RSA
curve:
description: Elliptic curve used to generate the TLS/SSL private key.
returned: changed or success, and I(type) is C(ECC)
type: str
sample: secp256r1
filename:
description: Path to the generated TLS/SSL private key file.
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
fingerprint:
description:
- The fingerprint of the public key. Fingerprint will be generated for each C(hashlib.algorithms) available.
- The PyOpenSSL backend requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/privatekey.pem.2019-03-09@11:22~
privatekey:
description:
- The (current or generated) private key's content.
- Will be Base64-encoded if the key is in raw format.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import abc
import base64
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_X25519,
CRYPTOGRAPHY_HAS_X25519_FULL,
CRYPTOGRAPHY_HAS_X448,
CRYPTOGRAPHY_HAS_ED25519,
CRYPTOGRAPHY_HAS_ED448,
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint,
get_fingerprint_of_bytes,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.identify import (
identify_private_key_format,
)
MINIMAL_PYOPENSSL_VERSION = '0.6'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.dsa
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.utils
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class PrivateKeyError(OpenSSLObjectError):
pass
class PrivateKeyBase(OpenSSLObject):
def __init__(self, module):
super(PrivateKeyBase, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.size = module.params['size']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.privatekey = None
self.fingerprint = {}
self.format = module.params['format']
self.format_mismatch = module.params['format_mismatch']
self.privatekey_bytes = None
self.return_content = module.params['return_content']
self.regenerate = module.params['regenerate']
if self.regenerate == 'always':
self.force = True
self.backup = module.params['backup']
self.backup_file = None
if module.params['mode'] is None:
module.params['mode'] = '0600'
@abc.abstractmethod
def _generate_private_key(self):
"""(Re-)Generate private key."""
pass
@abc.abstractmethod
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
pass
@abc.abstractmethod
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
pass
@abc.abstractmethod
def _get_fingerprint(self):
pass
def generate(self, module):
"""Generate a keypair."""
if not self.check(module, perms_required=False, ignore_conversion=True) or self.force:
# Regenerate
if self.backup:
self.backup_file = module.backup_local(self.path)
self._generate_private_key()
privatekey_data = self._get_private_key_data()
if self.return_content:
self.privatekey_bytes = privatekey_data
write_file(module, privatekey_data, 0o600)
self.changed = True
elif not self.check(module, perms_required=False, ignore_conversion=False):
# Convert
if self.backup:
self.backup_file = module.backup_local(self.path)
self._ensure_private_key_loaded()
privatekey_data = self._get_private_key_data()
if self.return_content:
self.privatekey_bytes = privatekey_data
write_file(module, privatekey_data, 0o600)
self.changed = True
self.fingerprint = self._get_fingerprint()
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(PrivateKeyBase, self).remove(module)
@abc.abstractmethod
def _check_passphrase(self):
pass
@abc.abstractmethod
def _check_size_and_type(self):
pass
@abc.abstractmethod
def _check_format(self):
pass
def check(self, module, perms_required=True, ignore_conversion=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PrivateKeyBase, self).check(module, perms_required=False)
if not state_and_perms:
# key does not exist
return False
if not self._check_passphrase():
if self.regenerate in ('full_idempotence', 'always'):
return False
module.fail_json(msg='Unable to read the key. The key is protected with a another passphrase / no passphrase or broken.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `full_idempotence` or `always`, or with `force=yes`.')
if self.regenerate != 'never':
if not self._check_size_and_type():
if self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
return False
module.fail_json(msg='Key has wrong type and/or size.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.')
if not self._check_format():
# During conversion step, convert if format does not match and format_mismatch == 'convert'
if not ignore_conversion and self.format_mismatch == 'convert':
return False
# During generation step, regenerate if format does not match and format_mismatch == 'regenerate'
if ignore_conversion and self.format_mismatch == 'regenerate' and self.regenerate != 'never':
if not ignore_conversion or self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
return False
module.fail_json(msg='Key has wrong format.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.'
' To convert the key, set `format_mismatch` to `convert`.')
# check whether permissions are correct (in case that needs to be checked)
return not perms_required or super(PrivateKeyBase, self).check(module, perms_required=perms_required)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'size': self.size,
'filename': self.path,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.privatekey_bytes is None:
self.privatekey_bytes = load_file_if_exists(self.path, ignore_errors=True)
if self.privatekey_bytes:
if identify_private_key_format(self.privatekey_bytes) == 'raw':
result['privatekey'] = base64.b64encode(self.privatekey_bytes)
else:
result['privatekey'] = self.privatekey_bytes.decode('utf-8')
else:
result['privatekey'] = None
return result
# Implementation with using pyOpenSSL
class PrivateKeyPyOpenSSL(PrivateKeyBase):
def __init__(self, module):
super(PrivateKeyPyOpenSSL, self).__init__(module)
if module.params['type'] == 'RSA':
self.type = crypto.TYPE_RSA
elif module.params['type'] == 'DSA':
self.type = crypto.TYPE_DSA
else:
module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.")
if self.format != 'auto_ignore':
module.fail_json(msg="PyOpenSSL backend only supports auto_ignore format.")
def _generate_private_key(self):
"""(Re-)Generate private key."""
self.privatekey = crypto.PKey()
try:
self.privatekey.generate_key(self.type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
if self.privatekey is None:
try:
self.privatekey = privatekey = load_privatekey(self.path, self.passphrase)
except OpenSSLBadPassphraseError as exc:
raise PrivateKeyError(exc)
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
if self.cipher and self.passphrase:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
self.cipher, to_bytes(self.passphrase))
else:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey)
def _get_fingerprint(self):
return get_fingerprint(self.path, self.passphrase)
def _check_passphrase(self):
try:
load_privatekey(self.path, self.passphrase)
return True
except Exception as dummy:
return False
def _check_size_and_type(self):
def _check_size(privatekey):
return self.size == privatekey.bits()
def _check_type(privatekey):
return self.type == privatekey.type()
self._ensure_private_key_loaded()
return _check_size(self.privatekey) and _check_type(self.privatekey)
def _check_format(self):
# Not supported by this backend
return True
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyPyOpenSSL, self).dump()
if self.type == crypto.TYPE_RSA:
result['type'] = 'RSA'
else:
result['type'] = 'DSA'
return result
# Implementation with using cryptography
class PrivateKeyCryptography(PrivateKeyBase):
def _get_ec_class(self, ectype):
ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype)
if ecclass is None:
self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype))
return ecclass
def _add_curve(self, name, ectype, deprecated=False):
def create(size):
ecclass = self._get_ec_class(ectype)
return ecclass()
def verify(privatekey):
ecclass = self._get_ec_class(ectype)
return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass)
self.curves[name] = {
'create': create,
'verify': verify,
'deprecated': deprecated,
}
def __init__(self, module):
super(PrivateKeyCryptography, self).__init__(module)
self.curves = dict()
self._add_curve('secp384r1', 'SECP384R1')
self._add_curve('secp521r1', 'SECP521R1')
self._add_curve('secp224r1', 'SECP224R1')
self._add_curve('secp192r1', 'SECP192R1')
self._add_curve('secp256r1', 'SECP256R1')
self._add_curve('secp256k1', 'SECP256K1')
self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True)
self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True)
self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True)
self._add_curve('sect571k1', 'SECT571K1', deprecated=True)
self._add_curve('sect409k1', 'SECT409K1', deprecated=True)
self._add_curve('sect283k1', 'SECT283K1', deprecated=True)
self._add_curve('sect233k1', 'SECT233K1', deprecated=True)
self._add_curve('sect163k1', 'SECT163K1', deprecated=True)
self._add_curve('sect571r1', 'SECT571R1', deprecated=True)
self._add_curve('sect409r1', 'SECT409R1', deprecated=True)
self._add_curve('sect283r1', 'SECT283R1', deprecated=True)
self._add_curve('sect233r1', 'SECT233R1', deprecated=True)
self._add_curve('sect163r2', 'SECT163R2', deprecated=True)
self.module = module
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
self.type = module.params['type']
self.curve = module.params['curve']
if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519')
if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519 serialization')
if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.module.fail_json(msg='Your cryptography version does not support X448')
if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.module.fail_json(msg='Your cryptography version does not support Ed25519')
if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.module.fail_json(msg='Your cryptography version does not support Ed448')
def _get_wanted_format(self):
if self.format not in ('auto', 'auto_ignore'):
return self.format
if self.type in ('X25519', 'X448', 'Ed25519', 'Ed448'):
return 'pkcs8'
else:
return 'pkcs1'
def _generate_private_key(self):
"""(Re-)Generate private key."""
try:
if self.type == 'RSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537, # OpenSSL always uses this
key_size=self.size,
backend=self.cryptography_backend
)
if self.type == 'DSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key(
key_size=self.size,
backend=self.cryptography_backend
)
if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate()
if self.type == 'ECC' and self.curve in self.curves:
if self.curves[self.curve]['deprecated']:
self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve))
self.privatekey = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key(
curve=self.curves[self.curve]['create'](self.size),
backend=self.cryptography_backend
)
except cryptography.exceptions.UnsupportedAlgorithm as dummy:
self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type))
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
if self.privatekey is None:
self.privatekey = self._load_privatekey()
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
# Select export format and encoding
try:
export_format = self._get_wanted_format()
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.PEM
if export_format == 'pkcs1':
# "TraditionalOpenSSL" format is PKCS1
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL
elif export_format == 'pkcs8':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
elif export_format == 'raw':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.Raw
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.Raw
except AttributeError:
self.module.fail_json(msg='Cryptography backend does not support the selected output format "{0}"'.format(self.format))
# Select key encryption
encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption()
if self.cipher and self.passphrase:
if self.cipher == 'auto':
encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase))
else:
self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.')
# Serialize key
try:
return self.privatekey.private_bytes(
encoding=export_encoding,
format=export_format,
encryption_algorithm=encryption_algorithm
)
except ValueError as dummy:
self.module.fail_json(
msg='Cryptography backend cannot serialize the private key in the required format "{0}"'.format(self.format)
)
except Exception as dummy:
self.module.fail_json(
msg='Error while serializing the private key in the required format "{0}"'.format(self.format),
exception=traceback.format_exc()
)
def _load_privatekey(self):
try:
# Read bytes
with open(self.path, 'rb') as f:
data = f.read()
# Interpret bytes depending on format.
format = identify_private_key_format(data)
if format == 'raw':
if len(data) == 56 and CRYPTOGRAPHY_HAS_X448:
return cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.from_private_bytes(data)
if len(data) == 57 and CRYPTOGRAPHY_HAS_ED448:
return cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.from_private_bytes(data)
if len(data) == 32:
if CRYPTOGRAPHY_HAS_X25519 and (self.type == 'X25519' or not CRYPTOGRAPHY_HAS_ED25519):
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_ED25519 and (self.type == 'Ed25519' or not CRYPTOGRAPHY_HAS_X25519):
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_X25519 and CRYPTOGRAPHY_HAS_ED25519:
try:
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
except Exception:
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
raise PrivateKeyError('Cannot load raw key')
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise PrivateKeyError(e)
def _get_fingerprint(self):
# Get bytes of public key
private_key = self._load_privatekey()
public_key = private_key.public_key()
public_key_bytes = public_key.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
# Get fingerprints of public_key_bytes
return get_fingerprint_of_bytes(public_key_bytes)
def _check_passphrase(self):
try:
with open(self.path, 'rb') as f:
data = f.read()
format = identify_private_key_format(data)
if format == 'raw':
# Raw keys cannot be encrypted. To avoid incompatibilities, we try to
# actually load the key (and return False when this fails).
self._load_privatekey()
# Loading the key succeeded. Only return True when no passphrase was
# provided.
return self.passphrase is None
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as dummy:
return False
def _check_size_and_type(self):
self._ensure_private_key_loaded()
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return self.type == 'RSA' and self.size == self.privatekey.key_size
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
return self.type == 'DSA' and self.size == self.privatekey.key_size
if CRYPTOGRAPHY_HAS_X25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
return self.type == 'X25519'
if CRYPTOGRAPHY_HAS_X448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
return self.type == 'X448'
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
return self.type == 'Ed25519'
if CRYPTOGRAPHY_HAS_ED448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
return self.type == 'Ed448'
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
if self.type != 'ECC':
return False
if self.curve not in self.curves:
return False
return self.curves[self.curve]['verify'](self.privatekey)
return False
def _check_format(self):
if self.format == 'auto_ignore':
return True
try:
with open(self.path, 'rb') as f:
content = f.read()
format = identify_private_key_format(content)
return format == self._get_wanted_format()
except Exception as dummy:
return False
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyCryptography, self).dump()
result['type'] = self.type
if self.type == 'ECC':
result['curve'] = self.curve
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
size=dict(type='int', default=4096),
type=dict(type='str', default='RSA', choices=[
'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448'
]),
curve=dict(type='str', choices=[
'secp384r1', 'secp521r1', 'secp224r1', 'secp192r1', 'secp256r1',
'secp256k1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1',
'sect571k1', 'sect409k1', 'sect283k1', 'sect233k1', 'sect163k1',
'sect571r1', 'sect409r1', 'sect283r1', 'sect233r1', 'sect163r2',
]),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
backup=dict(type='bool', default=False),
format=dict(type='str', default='auto_ignore', choices=['pkcs1', 'pkcs8', 'raw', 'auto', 'auto_ignore']),
format_mismatch=dict(type='str', default='regenerate', choices=['regenerate', 'convert']),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
return_content=dict(type='bool', default=False),
regenerate=dict(
type='str',
default='full_idempotence',
choices=['never', 'fail', 'partial_idempotence', 'full_idempotence', 'always']
),
),
supports_check_mode=True,
add_file_common_args=True,
required_together=[
['cipher', 'passphrase']
],
required_if=[
['type', 'ECC', ['curve']],
],
)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto':
# First try pyOpenSSL, then cryptography
if can_use_pyopenssl:
backend = 'pyopenssl'
elif can_use_cryptography:
backend = 'cryptography'
else:
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
private_key = PrivateKeyPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
private_key = PrivateKeyCryptography(module)
if private_key.state == 'present':
if module.check_mode:
result = private_key.dump()
result['changed'] = private_key.force \
or not private_key.check(module, ignore_conversion=True) \
or not private_key.check(module, ignore_conversion=False)
module.exit_json(**result)
private_key.generate(module)
else:
if module.check_mode:
result = private_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
private_key.remove(module)
result = private_key.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
| ./CrossVul/dataset_final_sorted/CWE-116/py/good_4295_2 |
crossvul-python_data_bad_4460_0 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
# standard library dependencies
try:
# prefer lxml as it supports XPath
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
from operator import attrgetter
import itertools
from petl.compat import string_types, text_type
# internal dependencies
from petl.util.base import Table
from petl.io.sources import read_source_from_arg
def fromxml(source, *args, **kwargs):
"""
Extract data from an XML file. E.g.::
>>> import petl as etl
>>> # setup a file to demonstrate with
... d = '''<table>
... <tr>
... <td>foo</td><td>bar</td>
... </tr>
... <tr>
... <td>a</td><td>1</td>
... </tr>
... <tr>
... <td>b</td><td>2</td>
... </tr>
... <tr>
... <td>c</td><td>2</td>
... </tr>
... </table>'''
>>> with open('example1.xml', 'w') as f:
... f.write(d)
...
212
>>> table1 = etl.fromxml('example1.xml', 'tr', 'td')
>>> table1
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | '1' |
+-----+-----+
| 'b' | '2' |
+-----+-----+
| 'c' | '2' |
+-----+-----+
If the data values are stored in an attribute, provide the attribute
name as an extra positional argument::
>>> d = '''<table>
... <tr>
... <td v='foo'/><td v='bar'/>
... </tr>
... <tr>
... <td v='a'/><td v='1'/>
... </tr>
... <tr>
... <td v='b'/><td v='2'/>
... </tr>
... <tr>
... <td v='c'/><td v='2'/>
... </tr>
... </table>'''
>>> with open('example2.xml', 'w') as f:
... f.write(d)
...
220
>>> table2 = etl.fromxml('example2.xml', 'tr', 'td', 'v')
>>> table2
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | '1' |
+-----+-----+
| 'b' | '2' |
+-----+-----+
| 'c' | '2' |
+-----+-----+
Data values can also be extracted by providing a mapping of field
names to element paths::
>>> d = '''<table>
... <row>
... <foo>a</foo><baz><bar v='1'/><bar v='3'/></baz>
... </row>
... <row>
... <foo>b</foo><baz><bar v='2'/></baz>
... </row>
... <row>
... <foo>c</foo><baz><bar v='2'/></baz>
... </row>
... </table>'''
>>> with open('example3.xml', 'w') as f:
... f.write(d)
...
223
>>> table3 = etl.fromxml('example3.xml', 'row',
... {'foo': 'foo', 'bar': ('baz/bar', 'v')})
>>> table3
+------------+-----+
| bar | foo |
+============+=====+
| ('1', '3') | 'a' |
+------------+-----+
| '2' | 'b' |
+------------+-----+
| '2' | 'c' |
+------------+-----+
If `lxml <http://lxml.de/>`_ is installed, full XPath expressions can be
used.
Note that the implementation is currently **not** streaming, i.e.,
the whole document is loaded into memory.
If multiple elements match a given field, all values are reported as a
tuple.
If there is more than one element name used for row values, a tuple
or list of paths can be provided, e.g.,
``fromxml('example.html', './/tr', ('th', 'td'))``.
"""
source = read_source_from_arg(source)
return XmlView(source, *args, **kwargs)
class XmlView(Table):
def __init__(self, source, *args, **kwargs):
self.source = source
self.args = args
if len(args) == 2 and isinstance(args[1], (string_types, tuple, list)):
self.rmatch = args[0]
self.vmatch = args[1]
self.vdict = None
self.attr = None
elif len(args) == 2 and isinstance(args[1], dict):
self.rmatch = args[0]
self.vmatch = None
self.vdict = args[1]
self.attr = None
elif len(args) == 3:
self.rmatch = args[0]
self.vmatch = args[1]
self.vdict = None
self.attr = args[2]
else:
assert False, 'bad parameters'
self.missing = kwargs.get('missing', None)
def __iter__(self):
vmatch = self.vmatch
vdict = self.vdict
with self.source.open('rb') as xmlf:
tree = etree.parse(xmlf)
if not hasattr(tree, 'iterfind'):
# Python 2.6 compatibility
tree.iterfind = tree.findall
if vmatch is not None:
# simple case, all value paths are the same
for rowelm in tree.iterfind(self.rmatch):
if self.attr is None:
getv = attrgetter('text')
else:
getv = lambda e: e.get(self.attr)
if isinstance(vmatch, string_types):
# match only one path
velms = rowelm.findall(vmatch)
else:
# match multiple paths
velms = itertools.chain(*[rowelm.findall(enm)
for enm in vmatch])
yield tuple(getv(velm)
for velm in velms)
else:
# difficult case, deal with different paths for each field
# determine output header
flds = tuple(sorted(map(text_type, vdict.keys())))
yield flds
# setup value getters
vmatches = dict()
vgetters = dict()
for f in flds:
vmatch = self.vdict[f]
if isinstance(vmatch, string_types):
# match element path
vmatches[f] = vmatch
vgetters[f] = element_text_getter(self.missing)
else:
# match element path and attribute name
vmatches[f] = vmatch[0]
attr = vmatch[1]
vgetters[f] = attribute_text_getter(attr, self.missing)
# determine data rows
for rowelm in tree.iterfind(self.rmatch):
yield tuple(vgetters[f](rowelm.findall(vmatches[f]))
for f in flds)
def element_text_getter(missing):
def _get(v):
if len(v) > 1:
return tuple(e.text for e in v)
elif len(v) == 1:
return v[0].text
else:
return missing
return _get
def attribute_text_getter(attr, missing):
def _get(v):
if len(v) > 1:
return tuple(e.get(attr) for e in v)
elif len(v) == 1:
return v[0].get(attr)
else:
return missing
return _get
| ./CrossVul/dataset_final_sorted/CWE-91/py/bad_4460_0 |
crossvul-python_data_good_4460_0 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
# standard library dependencies
try:
# prefer lxml as it supports XPath
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
from operator import attrgetter
import itertools
from petl.compat import string_types, text_type
# internal dependencies
from petl.util.base import Table
from petl.io.sources import read_source_from_arg
def fromxml(source, *args, **kwargs):
"""
Extract data from an XML file. E.g.::
>>> import petl as etl
>>> # setup a file to demonstrate with
... d = '''<table>
... <tr>
... <td>foo</td><td>bar</td>
... </tr>
... <tr>
... <td>a</td><td>1</td>
... </tr>
... <tr>
... <td>b</td><td>2</td>
... </tr>
... <tr>
... <td>c</td><td>2</td>
... </tr>
... </table>'''
>>> with open('example1.xml', 'w') as f:
... f.write(d)
...
212
>>> table1 = etl.fromxml('example1.xml', 'tr', 'td')
>>> table1
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | '1' |
+-----+-----+
| 'b' | '2' |
+-----+-----+
| 'c' | '2' |
+-----+-----+
If the data values are stored in an attribute, provide the attribute
name as an extra positional argument::
>>> d = '''<table>
... <tr>
... <td v='foo'/><td v='bar'/>
... </tr>
... <tr>
... <td v='a'/><td v='1'/>
... </tr>
... <tr>
... <td v='b'/><td v='2'/>
... </tr>
... <tr>
... <td v='c'/><td v='2'/>
... </tr>
... </table>'''
>>> with open('example2.xml', 'w') as f:
... f.write(d)
...
220
>>> table2 = etl.fromxml('example2.xml', 'tr', 'td', 'v')
>>> table2
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | '1' |
+-----+-----+
| 'b' | '2' |
+-----+-----+
| 'c' | '2' |
+-----+-----+
Data values can also be extracted by providing a mapping of field
names to element paths::
>>> d = '''<table>
... <row>
... <foo>a</foo><baz><bar v='1'/><bar v='3'/></baz>
... </row>
... <row>
... <foo>b</foo><baz><bar v='2'/></baz>
... </row>
... <row>
... <foo>c</foo><baz><bar v='2'/></baz>
... </row>
... </table>'''
>>> with open('example3.xml', 'w') as f:
... f.write(d)
...
223
>>> table3 = etl.fromxml('example3.xml', 'row',
... {'foo': 'foo', 'bar': ('baz/bar', 'v')})
>>> table3
+------------+-----+
| bar | foo |
+============+=====+
| ('1', '3') | 'a' |
+------------+-----+
| '2' | 'b' |
+------------+-----+
| '2' | 'c' |
+------------+-----+
If `lxml <http://lxml.de/>`_ is installed, full XPath expressions can be
used.
Note that the implementation is currently **not** streaming, i.e.,
the whole document is loaded into memory.
If multiple elements match a given field, all values are reported as a
tuple.
If there is more than one element name used for row values, a tuple
or list of paths can be provided, e.g.,
``fromxml('example.html', './/tr', ('th', 'td'))``.
Optionally a custom parser can be provided, e.g.,
``etl.fromxml('example1.xml', 'tr', 'td', parser=my_parser)``.
"""
source = read_source_from_arg(source)
return XmlView(source, *args, **kwargs)
class XmlView(Table):
def __init__(self, source, *args, **kwargs):
self.source = source
self.args = args
if len(args) == 2 and isinstance(args[1], (string_types, tuple, list)):
self.rmatch = args[0]
self.vmatch = args[1]
self.vdict = None
self.attr = None
elif len(args) == 2 and isinstance(args[1], dict):
self.rmatch = args[0]
self.vmatch = None
self.vdict = args[1]
self.attr = None
elif len(args) == 3:
self.rmatch = args[0]
self.vmatch = args[1]
self.vdict = None
self.attr = args[2]
else:
assert False, 'bad parameters'
self.missing = kwargs.get('missing', None)
self.user_parser = kwargs.get('parser', None)
def __iter__(self):
vmatch = self.vmatch
vdict = self.vdict
with self.source.open('rb') as xmlf:
parser2 = _create_xml_parser(self.user_parser)
tree = etree.parse(xmlf, parser=parser2)
if not hasattr(tree, 'iterfind'):
# Python 2.6 compatibility
tree.iterfind = tree.findall
if vmatch is not None:
# simple case, all value paths are the same
for rowelm in tree.iterfind(self.rmatch):
if self.attr is None:
getv = attrgetter('text')
else:
getv = lambda e: e.get(self.attr)
if isinstance(vmatch, string_types):
# match only one path
velms = rowelm.findall(vmatch)
else:
# match multiple paths
velms = itertools.chain(*[rowelm.findall(enm)
for enm in vmatch])
yield tuple(getv(velm)
for velm in velms)
else:
# difficult case, deal with different paths for each field
# determine output header
flds = tuple(sorted(map(text_type, vdict.keys())))
yield flds
# setup value getters
vmatches = dict()
vgetters = dict()
for f in flds:
vmatch = self.vdict[f]
if isinstance(vmatch, string_types):
# match element path
vmatches[f] = vmatch
vgetters[f] = element_text_getter(self.missing)
else:
# match element path and attribute name
vmatches[f] = vmatch[0]
attr = vmatch[1]
vgetters[f] = attribute_text_getter(attr, self.missing)
# determine data rows
for rowelm in tree.iterfind(self.rmatch):
yield tuple(vgetters[f](rowelm.findall(vmatches[f]))
for f in flds)
def _create_xml_parser(user_parser):
if user_parser is not None:
return user_parser
try:
# Default lxml parser.
# This will throw an error if parser is not set and lxml could not be imported
# because Python's built XML parser doesn't like the `resolve_entities` kwarg.
# return etree.XMLParser(resolve_entities=False)
return etree.XMLParser(resolve_entities=False)
except TypeError:
# lxml not available
return None
def element_text_getter(missing):
def _get(v):
if len(v) > 1:
return tuple(e.text for e in v)
elif len(v) == 1:
return v[0].text
else:
return missing
return _get
def attribute_text_getter(attr, missing):
def _get(v):
if len(v) > 1:
return tuple(e.get(attr) for e in v)
elif len(v) == 1:
return v[0].get(attr)
else:
return missing
return _get
| ./CrossVul/dataset_final_sorted/CWE-91/py/good_4460_0 |
crossvul-python_data_good_2494_1 | from __future__ import absolute_import
from django.conf import settings
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from typing import List, Optional, Set, Text
from zerver.decorator import authenticated_json_post_view
from zerver.lib.actions import do_invite_users, do_refer_friend, \
get_default_subs, internal_send_message
from zerver.lib.request import REQ, has_request_variables, JsonableError
from zerver.lib.response import json_success, json_error
from zerver.lib.streams import access_stream_by_name
from zerver.lib.validator import check_string, check_list
from zerver.models import PreregistrationUser, Stream, UserProfile
import re
@authenticated_json_post_view
@has_request_variables
def json_invite_users(request, user_profile,
invitee_emails_raw=REQ("invitee_emails"),
body=REQ("custom_body", default=None)):
# type: (HttpRequest, UserProfile, str, Optional[str]) -> HttpResponse
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
return json_error(_("Must be a realm administrator"))
if not invitee_emails_raw:
return json_error(_("You must specify at least one email address."))
if body == '':
body = None
invitee_emails = get_invitee_emails_set(invitee_emails_raw)
stream_names = request.POST.getlist('stream')
if not stream_names:
return json_error(_("You must specify at least one stream for invitees to join."))
# We unconditionally sub you to the notifications stream if it
# exists and is public.
notifications_stream = user_profile.realm.notifications_stream # type: Optional[Stream]
if notifications_stream and not notifications_stream.invite_only:
stream_names.append(notifications_stream.name)
streams = [] # type: List[Stream]
for stream_name in stream_names:
try:
(stream, recipient, sub) = access_stream_by_name(user_profile, stream_name)
except JsonableError:
return json_error(_("Stream does not exist: %s. No invites were sent.") % (stream_name,))
streams.append(stream)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams, body)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
return json_success()
def get_invitee_emails_set(invitee_emails_raw):
# type: (str) -> Set[str]
invitee_emails_list = set(re.split(r'[,\n]', invitee_emails_raw))
invitee_emails = set()
for email in invitee_emails_list:
is_email_with_name = re.search(r'<(?P<email>.*)>', email)
if is_email_with_name:
email = is_email_with_name.group('email')
invitee_emails.add(email.strip())
return invitee_emails
@authenticated_json_post_view
@has_request_variables
def json_refer_friend(request, user_profile, email=REQ()):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
if not email:
return json_error(_("No email address specified"))
if user_profile.invites_granted - user_profile.invites_used <= 0:
return json_error(_("Insufficient invites"))
do_refer_friend(user_profile, email)
return json_success()
| ./CrossVul/dataset_final_sorted/CWE-862/py/good_2494_1 |
crossvul-python_data_bad_2494_1 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-862/py/bad_2494_1 |
crossvul-python_data_good_3126_0 | """
Slixmpp: The Slick XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of Slixmpp.
See the file LICENSE for copying permissio
"""
import logging
import slixmpp
from slixmpp.stanza import Message, Iq
from slixmpp.xmlstream.handler import Callback
from slixmpp.xmlstream.matcher import StanzaPath
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.plugins import BasePlugin
from slixmpp.plugins.xep_0280 import stanza
log = logging.getLogger(__name__)
class XEP_0280(BasePlugin):
"""
XEP-0280 Message Carbons
"""
name = 'xep_0280'
description = 'XEP-0280: Message Carbons'
dependencies = {'xep_0030', 'xep_0297'}
stanza = stanza
def plugin_init(self):
self.xmpp.register_handler(
Callback('Carbon Received',
StanzaPath('message/carbon_received'),
self._handle_carbon_received))
self.xmpp.register_handler(
Callback('Carbon Sent',
StanzaPath('message/carbon_sent'),
self._handle_carbon_sent))
register_stanza_plugin(Message, stanza.ReceivedCarbon)
register_stanza_plugin(Message, stanza.SentCarbon)
register_stanza_plugin(Message, stanza.PrivateCarbon)
register_stanza_plugin(Iq, stanza.CarbonEnable)
register_stanza_plugin(Iq, stanza.CarbonDisable)
register_stanza_plugin(stanza.ReceivedCarbon,
self.xmpp['xep_0297'].stanza.Forwarded)
register_stanza_plugin(stanza.SentCarbon,
self.xmpp['xep_0297'].stanza.Forwarded)
def plugin_end(self):
self.xmpp.remove_handler('Carbon Received')
self.xmpp.remove_handler('Carbon Sent')
self.xmpp.plugin['xep_0030'].del_feature(feature='urn:xmpp:carbons:2')
def session_bind(self, jid):
self.xmpp.plugin['xep_0030'].add_feature('urn:xmpp:carbons:2')
def _handle_carbon_received(self, msg):
if msg['from'].bare == self.xmpp.boundjid.bare:
self.xmpp.event('carbon_received', msg)
def _handle_carbon_sent(self, msg):
if msg['from'].bare == self.xmpp.boundjid.bare:
self.xmpp.event('carbon_sent', msg)
def enable(self, ifrom=None, timeout=None, callback=None,
timeout_callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['from'] = ifrom
iq.enable('carbon_enable')
return iq.send(timeout_callback=timeout_callback, timeout=timeout,
callback=callback)
def disable(self, ifrom=None, timeout=None, callback=None,
timeout_callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['from'] = ifrom
iq.enable('carbon_disable')
return iq.send(timeout_callback=timeout_callback, timeout=timeout,
callback=callback)
| ./CrossVul/dataset_final_sorted/CWE-346/py/good_3126_0 |
crossvul-python_data_good_4367_9 | # SPDX-License-Identifier: EUPL-1.2
# Copyright (C) 2019 - 2020 Dimpact
from decouple import Csv, config as _config, undefined
def config(option: str, default=undefined, *args, **kwargs):
if "split" in kwargs:
kwargs.pop("split")
kwargs["cast"] = Csv()
if default == []:
default = ""
if default is not undefined and default is not None:
kwargs.setdefault("cast", type(default))
return _config(option, default=default, *args, **kwargs)
| ./CrossVul/dataset_final_sorted/CWE-346/py/good_4367_9 |
crossvul-python_data_bad_4367_9 | # SPDX-License-Identifier: EUPL-1.2
# Copyright (C) 2019 - 2020 Dimpact
from decouple import Csv, config as _config, undefined
def config(option: str, default=undefined, *args, **kwargs):
if "split" in kwargs:
kwargs.pop("split")
kwargs["cast"] = Csv()
if default is not undefined and default is not None:
kwargs.setdefault("cast", type(default))
return _config(option, default=default, *args, **kwargs)
| ./CrossVul/dataset_final_sorted/CWE-346/py/bad_4367_9 |
crossvul-python_data_bad_4367_7 | # SPDX-License-Identifier: EUPL-1.2
# Copyright (C) 2019 - 2020 Dimpact
import factory
class UserFactory(factory.django.DjangoModelFactory):
username = factory.Sequence(lambda n: f"user-{n}")
class Meta:
model = "accounts.User"
class SuperUserFactory(UserFactory):
is_staff = True
is_superuser = True
| ./CrossVul/dataset_final_sorted/CWE-346/py/bad_4367_7 |
crossvul-python_data_bad_4367_8 | # SPDX-License-Identifier: EUPL-1.2
# Copyright (C) 2019 - 2020 Dimpact
import datetime
import os
from django.urls import reverse_lazy
import git
import sentry_sdk
from sentry_sdk.integrations import django, redis
# NLX directory urls
from openzaak.config.constants import NLXDirectories
from ...utils.monitoring import filter_sensitive_data
from .api import * # noqa
from .environ import config
from .plugins import PLUGIN_INSTALLED_APPS
# Build paths inside the project, so further paths can be defined relative to
# the code root.
DJANGO_PROJECT_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
)
BASE_DIR = os.path.abspath(
os.path.join(DJANGO_PROJECT_DIR, os.path.pardir, os.path.pardir)
)
#
# Core Django settings
#
SITE_ID = config("SITE_ID", default=1)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# NEVER run with DEBUG=True in production-like environments
DEBUG = config("DEBUG", default=False)
# = domains we're running on
ALLOWED_HOSTS = config("ALLOWED_HOSTS", default="", split=True)
IS_HTTPS = config("IS_HTTPS", default=not DEBUG)
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = "nl-nl"
TIME_ZONE = "UTC" # note: this *may* affect the output of DRF datetimes
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
#
# DATABASE and CACHING setup
#
DATABASES = {
"default": {
"ENGINE": "django.contrib.gis.db.backends.postgis",
"NAME": config("DB_NAME", "openzaak"),
"USER": config("DB_USER", "openzaak"),
"PASSWORD": config("DB_PASSWORD", "openzaak"),
"HOST": config("DB_HOST", "localhost"),
"PORT": config("DB_PORT", 5432),
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{config('CACHE_DEFAULT', 'localhost:6379/0')}",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True,
},
},
"axes": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{config('CACHE_AXES', 'localhost:6379/0')}",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True,
},
},
}
#
# APPLICATIONS enabled for this project
#
INSTALLED_APPS = [
# Note: contenttypes should be first, see Django ticket #10827
"django.contrib.contenttypes",
"django.contrib.auth",
"django.contrib.sessions",
# Note: If enabled, at least one Site object is required
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# Optional applications.
"ordered_model",
"django_admin_index",
"django.contrib.admin",
"django.contrib.gis",
# 'django.contrib.admindocs',
# 'django.contrib.humanize',
# External applications.
"axes",
"django_auth_adfs",
"django_auth_adfs_db",
"django_filters",
"django_db_logger",
"corsheaders",
"extra_views",
"vng_api_common", # before drf_yasg to override the management command
"vng_api_common.authorizations",
"vng_api_common.audittrails",
"vng_api_common.notifications",
"nlx_url_rewriter",
"drf_yasg",
"rest_framework",
"rest_framework_gis",
"django_markup",
"solo",
"sniplates",
"privates",
"django_better_admin_arrayfield.apps.DjangoBetterAdminArrayfieldConfig",
"django_loose_fk",
"zgw_consumers",
"drc_cmis",
# Project applications.
"openzaak",
"openzaak.accounts",
"openzaak.utils",
"openzaak.components.autorisaties",
"openzaak.components.zaken",
"openzaak.components.besluiten",
"openzaak.components.documenten",
"openzaak.components.catalogi",
"openzaak.config",
"openzaak.selectielijst",
"openzaak.notifications",
] + PLUGIN_INSTALLED_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"openzaak.utils.middleware.LogHeadersMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
# 'django.middleware.locale.LocaleMiddleware',
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"openzaak.components.autorisaties.middleware.AuthMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"corsheaders.middleware.CorsMiddleware",
"openzaak.utils.middleware.APIVersionHeaderMiddleware",
"openzaak.utils.middleware.EnabledMiddleware",
]
ROOT_URLCONF = "openzaak.urls"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(DJANGO_PROJECT_DIR, "templates")],
"APP_DIRS": False, # conflicts with explicity specifying the loaders
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"openzaak.utils.context_processors.settings",
"django_admin_index.context_processors.dashboard",
],
"loaders": TEMPLATE_LOADERS,
},
}
]
WSGI_APPLICATION = "openzaak.wsgi.application"
# Translations
LOCALE_PATHS = (os.path.join(DJANGO_PROJECT_DIR, "conf", "locale"),)
#
# SERVING of static and media files
#
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Additional locations of static files
STATICFILES_DIRS = [os.path.join(DJANGO_PROJECT_DIR, "static")]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
#
# Sending EMAIL
#
EMAIL_HOST = config("EMAIL_HOST", default="localhost")
EMAIL_PORT = config(
"EMAIL_PORT", default=25
) # disabled on Google Cloud, use 487 instead
EMAIL_HOST_USER = config("EMAIL_HOST_USER", default="")
EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD", default="")
EMAIL_USE_TLS = config("EMAIL_USE_TLS", default=False)
EMAIL_TIMEOUT = 10
DEFAULT_FROM_EMAIL = "openzaak@example.com"
#
# LOGGING
#
LOG_STDOUT = config("LOG_STDOUT", default=False)
LOGGING_DIR = os.path.join(BASE_DIR, "log")
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(asctime)s %(levelname)s %(name)s %(module)s %(process)d %(thread)d %(message)s"
},
"timestamped": {"format": "%(asctime)s %(levelname)s %(name)s %(message)s"},
"simple": {"format": "%(levelname)s %(message)s"},
"performance": {"format": "%(asctime)s %(process)d | %(thread)d | %(message)s"},
},
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"failed_notification": {
"()": "openzaak.notifications.filters.FailedNotificationFilter"
},
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"null": {"level": "DEBUG", "class": "logging.NullHandler"},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "timestamped",
},
"django": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(LOGGING_DIR, "django.log"),
"formatter": "verbose",
"maxBytes": 1024 * 1024 * 10, # 10 MB
"backupCount": 10,
},
"project": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(LOGGING_DIR, "openzaak.log"),
"formatter": "verbose",
"maxBytes": 1024 * 1024 * 10, # 10 MB
"backupCount": 10,
},
"performance": {
"level": "INFO",
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(LOGGING_DIR, "performance.log"),
"formatter": "performance",
"maxBytes": 1024 * 1024 * 10, # 10 MB
"backupCount": 10,
},
"requests": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(LOGGING_DIR, "requests.log"),
"formatter": "timestamped",
"maxBytes": 1024 * 1024 * 10, # 10 MB
"backupCount": 10,
},
"failed_notification": {
"level": "DEBUG",
"filters": ["failed_notification"],
"class": "openzaak.notifications.handlers.DatabaseLogHandler",
},
},
"loggers": {
"openzaak": {
"handlers": ["project"] if not LOG_STDOUT else ["console"],
"level": "INFO",
"propagate": True,
},
"openzaak.utils.middleware": {
"handlers": ["requests"] if not LOG_STDOUT else ["console"],
"level": "DEBUG",
"propagate": False,
},
"vng_api_common": {"handlers": ["console"], "level": "INFO", "propagate": True},
"django.request": {
"handlers": ["django"] if not LOG_STDOUT else ["console"],
"level": "ERROR",
"propagate": True,
},
"django.template": {
"handlers": ["console"],
"level": "INFO",
"propagate": True,
},
"vng_api_common.notifications.viewsets": {
"handlers": [
"failed_notification", # always log this to the database!
"project" if not LOG_STDOUT else "console",
],
"level": "WARNING",
"propagate": True,
},
},
}
#
# AUTH settings - user accounts, passwords, backends...
#
AUTH_USER_MODEL = "accounts.User"
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Allow logging in with both username+password and email+password
AUTHENTICATION_BACKENDS = [
"openzaak.accounts.backends.UserModelEmailBackend",
"django.contrib.auth.backends.ModelBackend",
"django_auth_adfs_db.backends.AdfsAuthCodeBackend",
]
SESSION_COOKIE_NAME = "openzaak_sessionid"
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
LOGIN_URL = reverse_lazy("admin:login")
LOGIN_REDIRECT_URL = reverse_lazy("admin:index")
#
# SECURITY settings
#
SESSION_COOKIE_SECURE = IS_HTTPS
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = IS_HTTPS
X_FRAME_OPTIONS = "DENY"
#
# Silenced checks
#
SILENCED_SYSTEM_CHECKS = ["rest_framework.W001"]
#
# Custom settings
#
PROJECT_NAME = "Open Zaak"
SITE_TITLE = "API dashboard"
ENVIRONMENT = None
ENVIRONMENT_SHOWN_IN_ADMIN = True
# settings for uploading large files
MIN_UPLOAD_SIZE = config("MIN_UPLOAD_SIZE", 4 * 2 ** 30)
# urls for OAS3 specifications
SPEC_URL = {
"zaken": os.path.join(
BASE_DIR, "src", "openzaak", "components", "zaken", "openapi.yaml"
),
"besluiten": os.path.join(
BASE_DIR, "src", "openzaak", "components", "besluiten", "openapi.yaml"
),
"documenten": os.path.join(
BASE_DIR, "src", "openzaak", "components", "documenten", "openapi.yaml"
),
"catalogi": os.path.join(
BASE_DIR, "src", "openzaak", "components", "catalogi", "openapi.yaml"
),
"autorisaties": os.path.join(
BASE_DIR, "src", "openzaak", "components", "autorisaties", "openapi.yaml"
),
}
# Generating the schema, depending on the component
subpath = config("SUBPATH", None)
if subpath:
if not subpath.startswith("/"):
subpath = f"/{subpath}"
SUBPATH = subpath
if "GIT_SHA" in os.environ:
GIT_SHA = config("GIT_SHA", "")
# in docker (build) context, there is no .git directory
elif os.path.exists(os.path.join(BASE_DIR, ".git")):
repo = git.Repo(search_parent_directories=True)
GIT_SHA = repo.head.object.hexsha
else:
GIT_SHA = None
RELEASE = config("RELEASE", GIT_SHA)
##############################
# #
# 3RD PARTY LIBRARY SETTINGS #
# #
##############################
#
# AUTH-ADFS
#
AUTH_ADFS = {"SETTINGS_CLASS": "django_auth_adfs_db.settings.Settings"}
#
# DJANGO-AXES
#
AXES_CACHE = "axes" # refers to CACHES setting
AXES_LOGIN_FAILURE_LIMIT = 5 # Default: 3
AXES_LOCK_OUT_AT_FAILURE = True # Default: True
AXES_USE_USER_AGENT = False # Default: False
AXES_COOLOFF_TIME = datetime.timedelta(minutes=5) # One hour
AXES_BEHIND_REVERSE_PROXY = IS_HTTPS # We have either Ingress or Nginx
AXES_ONLY_USER_FAILURES = (
False # Default: False (you might want to block on username rather than IP)
)
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = (
False # Default: False (you might want to block on username and IP)
)
#
# DJANGO-HIJACK
#
HIJACK_LOGIN_REDIRECT_URL = reverse_lazy("home")
HIJACK_LOGOUT_REDIRECT_URL = reverse_lazy("admin:accounts_user_changelist")
HIJACK_REGISTER_ADMIN = False
# This is a CSRF-security risk.
# See: http://django-hijack.readthedocs.io/en/latest/configuration/#allowing-get-method-for-hijack-views
HIJACK_ALLOW_GET_REQUESTS = True
#
# DJANGO-CORS-MIDDLEWARE
#
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_HEADERS = (
"x-requested-with",
"content-type",
"accept",
"origin",
"authorization",
"x-csrftoken",
"user-agent",
"accept-encoding",
"accept-crs",
"content-crs",
)
#
# DJANGO-PRIVATES -- safely serve files after authorization
#
PRIVATE_MEDIA_ROOT = os.path.join(BASE_DIR, "private-media")
PRIVATE_MEDIA_URL = "/private-media/"
# requires an nginx container running in front
SENDFILE_BACKEND = config("SENDFILE_BACKEND", "django_sendfile.backends.nginx")
SENDFILE_ROOT = PRIVATE_MEDIA_ROOT
SENDFILE_URL = PRIVATE_MEDIA_URL
#
# DJANGO-LOOSE-FK -- handle internal and external API resources
#
DEFAULT_LOOSE_FK_LOADER = "openzaak.loaders.AuthorizedRequestsLoader"
#
# RAVEN/SENTRY - error monitoring
#
SENTRY_DSN = config("SENTRY_DSN", None)
SENTRY_SDK_INTEGRATIONS = [
django.DjangoIntegration(),
redis.RedisIntegration(),
]
if SENTRY_DSN:
SENTRY_CONFIG = {
"dsn": SENTRY_DSN,
"release": RELEASE or "RELEASE not set",
}
sentry_sdk.init(
**SENTRY_CONFIG,
integrations=SENTRY_SDK_INTEGRATIONS,
send_default_pii=True,
before_send=filter_sensitive_data,
)
#
# DJANGO-ADMIN-INDEX
#
ADMIN_INDEX_SHOW_REMAINING_APPS_TO_SUPERUSERS = False
ADMIN_INDEX_AUTO_CREATE_APP_GROUP = False
#
# OpenZaak configuration
#
OPENZAAK_API_CONTACT_EMAIL = "support@maykinmedia.nl"
OPENZAAK_API_CONTACT_URL = "https://www.maykinmedia.nl"
STORE_FAILED_NOTIFS = True
# Expiry time in seconds for JWT
JWT_EXPIRY = config("JWT_EXPIRY", default=3600)
NLX_DIRECTORY_URLS = {
NLXDirectories.demo: "https://directory.demo.nlx.io/",
NLXDirectories.preprod: "https://directory.preprod.nlx.io/",
NLXDirectories.prod: "https://directory.prod.nlx.io/",
}
CUSTOM_CLIENT_FETCHER = "openzaak.utils.auth.get_client"
CMIS_ENABLED = config("CMIS_ENABLED", default=False)
CMIS_MAPPER_FILE = config(
"CMIS_MAPPER_FILE", default=os.path.join(BASE_DIR, "config", "cmis_mapper.json")
)
| ./CrossVul/dataset_final_sorted/CWE-346/py/bad_4367_8 |
crossvul-python_data_bad_3126_0 | """
Slixmpp: The Slick XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of Slixmpp.
See the file LICENSE for copying permissio
"""
import logging
import slixmpp
from slixmpp.stanza import Message, Iq
from slixmpp.xmlstream.handler import Callback
from slixmpp.xmlstream.matcher import StanzaPath
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.plugins import BasePlugin
from slixmpp.plugins.xep_0280 import stanza
log = logging.getLogger(__name__)
class XEP_0280(BasePlugin):
"""
XEP-0280 Message Carbons
"""
name = 'xep_0280'
description = 'XEP-0280: Message Carbons'
dependencies = {'xep_0030', 'xep_0297'}
stanza = stanza
def plugin_init(self):
self.xmpp.register_handler(
Callback('Carbon Received',
StanzaPath('message/carbon_received'),
self._handle_carbon_received))
self.xmpp.register_handler(
Callback('Carbon Sent',
StanzaPath('message/carbon_sent'),
self._handle_carbon_sent))
register_stanza_plugin(Message, stanza.ReceivedCarbon)
register_stanza_plugin(Message, stanza.SentCarbon)
register_stanza_plugin(Message, stanza.PrivateCarbon)
register_stanza_plugin(Iq, stanza.CarbonEnable)
register_stanza_plugin(Iq, stanza.CarbonDisable)
register_stanza_plugin(stanza.ReceivedCarbon,
self.xmpp['xep_0297'].stanza.Forwarded)
register_stanza_plugin(stanza.SentCarbon,
self.xmpp['xep_0297'].stanza.Forwarded)
def plugin_end(self):
self.xmpp.remove_handler('Carbon Received')
self.xmpp.remove_handler('Carbon Sent')
self.xmpp.plugin['xep_0030'].del_feature(feature='urn:xmpp:carbons:2')
def session_bind(self, jid):
self.xmpp.plugin['xep_0030'].add_feature('urn:xmpp:carbons:2')
def _handle_carbon_received(self, msg):
self.xmpp.event('carbon_received', msg)
def _handle_carbon_sent(self, msg):
self.xmpp.event('carbon_sent', msg)
def enable(self, ifrom=None, timeout=None, callback=None,
timeout_callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['from'] = ifrom
iq.enable('carbon_enable')
return iq.send(timeout_callback=timeout_callback, timeout=timeout,
callback=callback)
def disable(self, ifrom=None, timeout=None, callback=None,
timeout_callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['from'] = ifrom
iq.enable('carbon_disable')
return iq.send(timeout_callback=timeout_callback, timeout=timeout,
callback=callback)
| ./CrossVul/dataset_final_sorted/CWE-346/py/bad_3126_0 |
crossvul-python_data_good_4367_8 | # SPDX-License-Identifier: EUPL-1.2
# Copyright (C) 2019 - 2020 Dimpact
import datetime
import os
from django.urls import reverse_lazy
import git
import sentry_sdk
from corsheaders.defaults import default_headers as default_cors_headers
from sentry_sdk.integrations import django, redis
# NLX directory urls
from openzaak.config.constants import NLXDirectories
from ...utils.monitoring import filter_sensitive_data
from .api import * # noqa
from .environ import config
from .plugins import PLUGIN_INSTALLED_APPS
# Build paths inside the project, so further paths can be defined relative to
# the code root.
DJANGO_PROJECT_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
)
BASE_DIR = os.path.abspath(
os.path.join(DJANGO_PROJECT_DIR, os.path.pardir, os.path.pardir)
)
#
# Core Django settings
#
SITE_ID = config("SITE_ID", default=1)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# NEVER run with DEBUG=True in production-like environments
DEBUG = config("DEBUG", default=False)
# = domains we're running on
ALLOWED_HOSTS = config("ALLOWED_HOSTS", default="", split=True)
IS_HTTPS = config("IS_HTTPS", default=not DEBUG)
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = "nl-nl"
TIME_ZONE = "UTC" # note: this *may* affect the output of DRF datetimes
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
#
# DATABASE and CACHING setup
#
DATABASES = {
"default": {
"ENGINE": "django.contrib.gis.db.backends.postgis",
"NAME": config("DB_NAME", "openzaak"),
"USER": config("DB_USER", "openzaak"),
"PASSWORD": config("DB_PASSWORD", "openzaak"),
"HOST": config("DB_HOST", "localhost"),
"PORT": config("DB_PORT", 5432),
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{config('CACHE_DEFAULT', 'localhost:6379/0')}",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True,
},
},
"axes": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{config('CACHE_AXES', 'localhost:6379/0')}",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True,
},
},
}
#
# APPLICATIONS enabled for this project
#
INSTALLED_APPS = [
# Note: contenttypes should be first, see Django ticket #10827
"django.contrib.contenttypes",
"django.contrib.auth",
"django.contrib.sessions",
# Note: If enabled, at least one Site object is required
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# Optional applications.
"ordered_model",
"django_admin_index",
"django.contrib.admin",
"django.contrib.gis",
# 'django.contrib.admindocs',
# 'django.contrib.humanize',
# External applications.
"axes",
"django_auth_adfs",
"django_auth_adfs_db",
"django_filters",
"django_db_logger",
"corsheaders",
"extra_views",
"vng_api_common", # before drf_yasg to override the management command
"vng_api_common.authorizations",
"vng_api_common.audittrails",
"vng_api_common.notifications",
"nlx_url_rewriter",
"drf_yasg",
"rest_framework",
"rest_framework_gis",
"django_markup",
"solo",
"sniplates",
"privates",
"django_better_admin_arrayfield.apps.DjangoBetterAdminArrayfieldConfig",
"django_loose_fk",
"zgw_consumers",
"drc_cmis",
# Project applications.
"openzaak",
"openzaak.accounts",
"openzaak.utils",
"openzaak.components.autorisaties",
"openzaak.components.zaken",
"openzaak.components.besluiten",
"openzaak.components.documenten",
"openzaak.components.catalogi",
"openzaak.config",
"openzaak.selectielijst",
"openzaak.notifications",
] + PLUGIN_INSTALLED_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"openzaak.utils.middleware.LogHeadersMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
# 'django.middleware.locale.LocaleMiddleware',
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"openzaak.components.autorisaties.middleware.AuthMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"openzaak.utils.middleware.APIVersionHeaderMiddleware",
"openzaak.utils.middleware.EnabledMiddleware",
]
ROOT_URLCONF = "openzaak.urls"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(DJANGO_PROJECT_DIR, "templates")],
"APP_DIRS": False, # conflicts with explicity specifying the loaders
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"openzaak.utils.context_processors.settings",
"django_admin_index.context_processors.dashboard",
],
"loaders": TEMPLATE_LOADERS,
},
}
]
WSGI_APPLICATION = "openzaak.wsgi.application"
# Translations
LOCALE_PATHS = (os.path.join(DJANGO_PROJECT_DIR, "conf", "locale"),)
#
# SERVING of static and media files
#
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Additional locations of static files
STATICFILES_DIRS = [os.path.join(DJANGO_PROJECT_DIR, "static")]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
#
# Sending EMAIL
#
EMAIL_HOST = config("EMAIL_HOST", default="localhost")
EMAIL_PORT = config(
"EMAIL_PORT", default=25
) # disabled on Google Cloud, use 487 instead
EMAIL_HOST_USER = config("EMAIL_HOST_USER", default="")
EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD", default="")
EMAIL_USE_TLS = config("EMAIL_USE_TLS", default=False)
EMAIL_TIMEOUT = 10
DEFAULT_FROM_EMAIL = "openzaak@example.com"
#
# LOGGING
#
LOG_STDOUT = config("LOG_STDOUT", default=False)
LOGGING_DIR = os.path.join(BASE_DIR, "log")
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(asctime)s %(levelname)s %(name)s %(module)s %(process)d %(thread)d %(message)s"
},
"timestamped": {"format": "%(asctime)s %(levelname)s %(name)s %(message)s"},
"simple": {"format": "%(levelname)s %(message)s"},
"performance": {"format": "%(asctime)s %(process)d | %(thread)d | %(message)s"},
},
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"failed_notification": {
"()": "openzaak.notifications.filters.FailedNotificationFilter"
},
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"null": {"level": "DEBUG", "class": "logging.NullHandler"},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "timestamped",
},
"django": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(LOGGING_DIR, "django.log"),
"formatter": "verbose",
"maxBytes": 1024 * 1024 * 10, # 10 MB
"backupCount": 10,
},
"project": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(LOGGING_DIR, "openzaak.log"),
"formatter": "verbose",
"maxBytes": 1024 * 1024 * 10, # 10 MB
"backupCount": 10,
},
"performance": {
"level": "INFO",
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(LOGGING_DIR, "performance.log"),
"formatter": "performance",
"maxBytes": 1024 * 1024 * 10, # 10 MB
"backupCount": 10,
},
"requests": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(LOGGING_DIR, "requests.log"),
"formatter": "timestamped",
"maxBytes": 1024 * 1024 * 10, # 10 MB
"backupCount": 10,
},
"failed_notification": {
"level": "DEBUG",
"filters": ["failed_notification"],
"class": "openzaak.notifications.handlers.DatabaseLogHandler",
},
},
"loggers": {
"openzaak": {
"handlers": ["project"] if not LOG_STDOUT else ["console"],
"level": "INFO",
"propagate": True,
},
"openzaak.utils.middleware": {
"handlers": ["requests"] if not LOG_STDOUT else ["console"],
"level": "DEBUG",
"propagate": False,
},
"vng_api_common": {"handlers": ["console"], "level": "INFO", "propagate": True},
"django.request": {
"handlers": ["django"] if not LOG_STDOUT else ["console"],
"level": "ERROR",
"propagate": True,
},
"django.template": {
"handlers": ["console"],
"level": "INFO",
"propagate": True,
},
"vng_api_common.notifications.viewsets": {
"handlers": [
"failed_notification", # always log this to the database!
"project" if not LOG_STDOUT else "console",
],
"level": "WARNING",
"propagate": True,
},
},
}
#
# AUTH settings - user accounts, passwords, backends...
#
AUTH_USER_MODEL = "accounts.User"
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Allow logging in with both username+password and email+password
AUTHENTICATION_BACKENDS = [
"openzaak.accounts.backends.UserModelEmailBackend",
"django.contrib.auth.backends.ModelBackend",
"django_auth_adfs_db.backends.AdfsAuthCodeBackend",
]
SESSION_COOKIE_NAME = "openzaak_sessionid"
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
LOGIN_URL = reverse_lazy("admin:login")
LOGIN_REDIRECT_URL = reverse_lazy("admin:index")
#
# SECURITY settings
#
SESSION_COOKIE_SECURE = IS_HTTPS
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = IS_HTTPS
X_FRAME_OPTIONS = "DENY"
#
# Silenced checks
#
SILENCED_SYSTEM_CHECKS = ["rest_framework.W001"]
#
# Custom settings
#
PROJECT_NAME = "Open Zaak"
SITE_TITLE = "API dashboard"
ENVIRONMENT = None
ENVIRONMENT_SHOWN_IN_ADMIN = True
# settings for uploading large files
MIN_UPLOAD_SIZE = config("MIN_UPLOAD_SIZE", 4 * 2 ** 30)
# urls for OAS3 specifications
SPEC_URL = {
"zaken": os.path.join(
BASE_DIR, "src", "openzaak", "components", "zaken", "openapi.yaml"
),
"besluiten": os.path.join(
BASE_DIR, "src", "openzaak", "components", "besluiten", "openapi.yaml"
),
"documenten": os.path.join(
BASE_DIR, "src", "openzaak", "components", "documenten", "openapi.yaml"
),
"catalogi": os.path.join(
BASE_DIR, "src", "openzaak", "components", "catalogi", "openapi.yaml"
),
"autorisaties": os.path.join(
BASE_DIR, "src", "openzaak", "components", "autorisaties", "openapi.yaml"
),
}
# Generating the schema, depending on the component
subpath = config("SUBPATH", None)
if subpath:
if not subpath.startswith("/"):
subpath = f"/{subpath}"
SUBPATH = subpath
if "GIT_SHA" in os.environ:
GIT_SHA = config("GIT_SHA", "")
# in docker (build) context, there is no .git directory
elif os.path.exists(os.path.join(BASE_DIR, ".git")):
repo = git.Repo(search_parent_directories=True)
GIT_SHA = repo.head.object.hexsha
else:
GIT_SHA = None
RELEASE = config("RELEASE", GIT_SHA)
##############################
# #
# 3RD PARTY LIBRARY SETTINGS #
# #
##############################
#
# AUTH-ADFS
#
AUTH_ADFS = {"SETTINGS_CLASS": "django_auth_adfs_db.settings.Settings"}
#
# DJANGO-AXES
#
AXES_CACHE = "axes" # refers to CACHES setting
AXES_LOGIN_FAILURE_LIMIT = 5 # Default: 3
AXES_LOCK_OUT_AT_FAILURE = True # Default: True
AXES_USE_USER_AGENT = False # Default: False
AXES_COOLOFF_TIME = datetime.timedelta(minutes=5) # One hour
AXES_BEHIND_REVERSE_PROXY = IS_HTTPS # We have either Ingress or Nginx
AXES_ONLY_USER_FAILURES = (
False # Default: False (you might want to block on username rather than IP)
)
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = (
False # Default: False (you might want to block on username and IP)
)
#
# DJANGO-HIJACK
#
HIJACK_LOGIN_REDIRECT_URL = reverse_lazy("home")
HIJACK_LOGOUT_REDIRECT_URL = reverse_lazy("admin:accounts_user_changelist")
HIJACK_REGISTER_ADMIN = False
# This is a CSRF-security risk.
# See: http://django-hijack.readthedocs.io/en/latest/configuration/#allowing-get-method-for-hijack-views
HIJACK_ALLOW_GET_REQUESTS = True
#
# DJANGO-CORS-MIDDLEWARE
#
CORS_ALLOW_ALL_ORIGINS = config("CORS_ALLOW_ALL_ORIGINS", default=False)
CORS_ALLOWED_ORIGINS = config("CORS_ALLOWED_ORIGINS", split=True, default=[])
CORS_ALLOWED_ORIGIN_REGEXES = config(
"CORS_ALLOWED_ORIGIN_REGEXES", split=True, default=[]
)
# Authorization is included in default_cors_headers
CORS_ALLOW_HEADERS = (
list(default_cors_headers)
+ ["accept-crs", "content-crs",]
+ config("CORS_EXTRA_ALLOW_HEADERS", split=True, default=[])
)
CORS_EXPOSE_HEADERS = [
"content-crs",
]
# Django's SESSION_COOKIE_SAMESITE = "Lax" prevents session cookies from being sent
# cross-domain. There is no need for these cookies to be sent, since the API itself
# uses Bearer Authentication.
#
# DJANGO-PRIVATES -- safely serve files after authorization
#
PRIVATE_MEDIA_ROOT = os.path.join(BASE_DIR, "private-media")
PRIVATE_MEDIA_URL = "/private-media/"
# requires an nginx container running in front
SENDFILE_BACKEND = config("SENDFILE_BACKEND", "django_sendfile.backends.nginx")
SENDFILE_ROOT = PRIVATE_MEDIA_ROOT
SENDFILE_URL = PRIVATE_MEDIA_URL
#
# DJANGO-LOOSE-FK -- handle internal and external API resources
#
DEFAULT_LOOSE_FK_LOADER = "openzaak.loaders.AuthorizedRequestsLoader"
#
# RAVEN/SENTRY - error monitoring
#
SENTRY_DSN = config("SENTRY_DSN", None)
SENTRY_SDK_INTEGRATIONS = [
django.DjangoIntegration(),
redis.RedisIntegration(),
]
if SENTRY_DSN:
SENTRY_CONFIG = {
"dsn": SENTRY_DSN,
"release": RELEASE or "RELEASE not set",
}
sentry_sdk.init(
**SENTRY_CONFIG,
integrations=SENTRY_SDK_INTEGRATIONS,
send_default_pii=True,
before_send=filter_sensitive_data,
)
#
# DJANGO-ADMIN-INDEX
#
ADMIN_INDEX_SHOW_REMAINING_APPS_TO_SUPERUSERS = False
ADMIN_INDEX_AUTO_CREATE_APP_GROUP = False
#
# OpenZaak configuration
#
OPENZAAK_API_CONTACT_EMAIL = "support@maykinmedia.nl"
OPENZAAK_API_CONTACT_URL = "https://www.maykinmedia.nl"
STORE_FAILED_NOTIFS = True
# Expiry time in seconds for JWT
JWT_EXPIRY = config("JWT_EXPIRY", default=3600)
NLX_DIRECTORY_URLS = {
NLXDirectories.demo: "https://directory.demo.nlx.io/",
NLXDirectories.preprod: "https://directory.preprod.nlx.io/",
NLXDirectories.prod: "https://directory.prod.nlx.io/",
}
CUSTOM_CLIENT_FETCHER = "openzaak.utils.auth.get_client"
CMIS_ENABLED = config("CMIS_ENABLED", default=False)
CMIS_MAPPER_FILE = config(
"CMIS_MAPPER_FILE", default=os.path.join(BASE_DIR, "config", "cmis_mapper.json")
)
| ./CrossVul/dataset_final_sorted/CWE-346/py/good_4367_8 |
crossvul-python_data_good_4367_7 | # SPDX-License-Identifier: EUPL-1.2
# Copyright (C) 2019 - 2020 Dimpact
import factory
class UserFactory(factory.django.DjangoModelFactory):
username = factory.Sequence(lambda n: f"user-{n}")
password = factory.PostGenerationMethodCall("set_password")
class Meta:
model = "accounts.User"
class SuperUserFactory(UserFactory):
is_staff = True
is_superuser = True
| ./CrossVul/dataset_final_sorted/CWE-346/py/good_4367_7 |
crossvul-python_data_bad_5731_0 | """ Track relationships between compiled extension functions & code fragments
catalog keeps track of which compiled(or even standard) functions are
related to which code fragments. It also stores these relationships
to disk so they are remembered between Python sessions. When
a = 1
compiler.inline('printf("printed from C: %d",a);',['a'] )
is called, inline() first looks to see if it has seen the code
'printf("printed from C");' before. If not, it calls
catalog.get_functions('printf("printed from C: %d", a);')
which returns a list of all the function objects that have been compiled
for the code fragment. Multiple functions can occur because the code
could be compiled for different types for 'a' (although not likely in
this case). The catalog first looks in its cache and quickly returns
a list of the functions if possible. If the cache lookup fails, it then
looks through possibly multiple catalog files on disk and fills its
cache with all the functions that match the code fragment.
In case where the code fragment hasn't been compiled, inline() compiles
the code and then adds it to the catalog:
function = <code to compile function>
catalog.add_function('printf("printed from C: %d", a);',function)
add_function() adds function to the front of the cache. function,
along with the path information to its module, are also stored in a
persistent catalog for future use by python sessions.
"""
from __future__ import absolute_import, print_function
import os
import sys
import pickle
import socket
import tempfile
try:
import dbhash
import shelve
dumb = 0
except ImportError:
from . import _dumb_shelve as shelve
dumb = 1
#For testing...
#import scipy.io.dumb_shelve as shelve
#dumb = 1
#import shelve
#dumb = 0
def getmodule(object):
""" Discover the name of the module where object was defined.
This is an augmented version of inspect.getmodule that can discover
the parent module for extension functions.
"""
import inspect
value = inspect.getmodule(object)
if value is None:
#walk trough all modules looking for function
for name,mod in sys.modules.items():
# try except used because of some comparison failures
# in wxPoint code. Need to review this
try:
if mod and object in mod.__dict__.values():
value = mod
# if it is a built-in module, keep looking to see
# if a non-builtin also has it. Otherwise quit and
# consider the module found. (ain't perfect, but will
# have to do for now).
if str(mod) not in '(built-in)':
break
except (TypeError, KeyError, ImportError):
pass
return value
def expr_to_filename(expr):
""" Convert an arbitrary expr string to a valid file name.
The name is based on the md5 check sum for the string and
Something that was a little more human readable would be
nice, but the computer doesn't seem to care.
"""
import scipy.weave.md5_load as md5
base = 'sc_'
return base + md5.new(expr).hexdigest()
def unique_file(d,expr):
""" Generate a unqiue file name based on expr in directory d
This is meant for use with building extension modules, so
a file name is considered unique if none of the following
extension '.cpp','.o','.so','module.so','.py', or '.pyd'
exists in directory d. The fully qualified path to the
new name is returned. You'll need to append your own
extension to it before creating files.
"""
files = os.listdir(d)
#base = 'scipy_compile'
base = expr_to_filename(expr)
for i in xrange(1000000):
fname = base + repr(i)
if not (fname+'.cpp' in files or
fname+'.o' in files or
fname+'.so' in files or
fname+'module.so' in files or
fname+'.py' in files or
fname+'.pyd' in files):
break
return os.path.join(d,fname)
def is_writable(dir):
"""Determine whether a given directory is writable in a portable manner.
Parameters
----------
dir : str
A string represeting a path to a directory on the filesystem.
Returns
-------
res : bool
True or False.
"""
if not os.path.isdir(dir):
return False
# Do NOT use a hardcoded name here due to the danger from race conditions
# on NFS when multiple processes are accessing the same base directory in
# parallel. We use both hostname and pocess id for the prefix in an
# attempt to ensure that there can really be no name collisions (tempfile
# appends 6 random chars to this prefix).
prefix = 'dummy_%s_%s_' % (socket.gethostname(),os.getpid())
try:
tmp = tempfile.TemporaryFile(prefix=prefix,dir=dir)
except OSError:
return False
# The underlying file is destroyed upon closing the file object (under
# *nix, it was unlinked at creation time)
tmp.close()
return True
def whoami():
"""return a string identifying the user."""
return os.environ.get("USER") or os.environ.get("USERNAME") or "unknown"
def default_dir():
""" Return a default location to store compiled files and catalogs.
XX is the Python version number in all paths listed below
On windows, the default location is the temporary directory
returned by gettempdir()/pythonXX.
On Unix, ~/.pythonXX_compiled is the default location. If it doesn't
exist, it is created. The directory is marked rwx------.
If for some reason it isn't possible to build a default directory
in the user's home, /tmp/<uid>_pythonXX_compiled is used. If it
doesn't exist, it is created. The directory is marked rwx------
to try and keep people from being able to sneak a bad module
in on you.
"""
# Use a cached value for fast return if possible
if hasattr(default_dir,"cached_path") and \
os.path.exists(default_dir.cached_path) and \
os.access(default_dir.cached_path, os.W_OK):
return default_dir.cached_path
python_name = "python%d%d_compiled" % tuple(sys.version_info[:2])
path_candidates = []
if sys.platform != 'win32':
try:
path_candidates.append(os.path.join(os.environ['HOME'],
'.' + python_name))
except KeyError:
pass
temp_dir = repr(os.getuid()) + '_' + python_name
path_candidates.append(os.path.join(tempfile.gettempdir(), temp_dir))
else:
path_candidates.append(os.path.join(tempfile.gettempdir(),
"%s" % whoami(), python_name))
writable = False
for path in path_candidates:
if not os.path.exists(path):
try:
os.makedirs(path, mode=0o700)
except OSError:
continue
if is_writable(path):
writable = True
break
if not writable:
print('warning: default directory is not write accessible.')
print('default:', path)
# Cache the default dir path so that this function returns quickly after
# being called once (nothing in it should change after the first call)
default_dir.cached_path = path
return path
def intermediate_dir():
""" Location in temp dir for storing .cpp and .o files during
builds.
"""
python_name = "python%d%d_intermediate" % tuple(sys.version_info[:2])
path = os.path.join(tempfile.gettempdir(),"%s"%whoami(),python_name)
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
return path
def default_temp_dir():
path = os.path.join(default_dir(),'temp')
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
if not is_writable(path):
print('warning: default directory is not write accessible.')
print('default:', path)
return path
def os_dependent_catalog_name():
""" Generate catalog name dependent on OS and Python version being used.
This allows multiple platforms to have catalog files in the
same directory without stepping on each other. For now, it
bases the name of the value returned by sys.platform and the
version of python being run. If this isn't enough to descriminate
on some platforms, we can try to add other info. It has
occurred to me that if we get fancy enough to optimize for different
architectures, then chip type might be added to the catalog name also.
"""
version = '%d%d' % sys.version_info[:2]
return sys.platform+version+'compiled_catalog'
def catalog_path(module_path):
""" Return the full path name for the catalog file in the given directory.
module_path can either be a file name or a path name. If it is a
file name, the catalog file name in its parent directory is returned.
If it is a directory, the catalog file in that directory is returned.
If module_path doesn't exist, None is returned. Note though, that the
catalog file does *not* have to exist, only its parent. '~', shell
variables, and relative ('.' and '..') paths are all acceptable.
catalog file names are os dependent (based on sys.platform), so this
should support multiple platforms sharing the same disk space
(NFS mounts). See os_dependent_catalog_name() for more info.
"""
module_path = os.path.expanduser(module_path)
module_path = os.path.expandvars(module_path)
module_path = os.path.abspath(module_path)
if not os.path.exists(module_path):
catalog_file = None
elif not os.path.isdir(module_path):
module_path,dummy = os.path.split(module_path)
catalog_file = os.path.join(module_path,os_dependent_catalog_name())
else:
catalog_file = os.path.join(module_path,os_dependent_catalog_name())
return catalog_file
def get_catalog(module_path,mode='r'):
""" Return a function catalog (shelve object) from the path module_path
If module_path is a directory, the function catalog returned is
from that directory. If module_path is an actual module_name,
then the function catalog returned is from its parent directory.
mode uses the standard 'c' = create, 'n' = new, 'r' = read,
'w' = write file open modes available for anydbm databases.
Well... it should be. Stuck with dumbdbm for now and the modes
almost don't matter. We do some checking for 'r' mode, but that
is about it.
See catalog_path() for more information on module_path.
"""
if mode not in ['c','r','w','n']:
msg = " mode must be 'c', 'n', 'r', or 'w'. See anydbm for more info"
raise ValueError(msg)
catalog_file = catalog_path(module_path)
if (catalog_file is not None) \
and ((dumb and os.path.exists(catalog_file+'.dat')) \
or os.path.exists(catalog_file)):
sh = shelve.open(catalog_file,mode)
else:
if mode=='r':
sh = None
else:
sh = shelve.open(catalog_file,mode)
return sh
class catalog(object):
""" Stores information about compiled functions both in cache and on disk.
catalog stores (code, list_of_function) pairs so that all the functions
that have been compiled for code are available for calling (usually in
inline or blitz).
catalog keeps a dictionary of previously accessed code values cached
for quick access. It also handles the looking up of functions compiled
in previously called Python sessions on disk in function catalogs.
catalog searches the directories in the PYTHONCOMPILED environment
variable in order loading functions that correspond to the given code
fragment. A default directory is also searched for catalog functions.
On unix, the default directory is usually '~/.pythonxx_compiled' where
xx is the version of Python used. On windows, it is the directory
returned by temfile.gettempdir(). Functions closer to the front are of
the variable list are guaranteed to be closer to the front of the
function list so that they will be called first. See
get_cataloged_functions() for more info on how the search order is
traversed.
Catalog also handles storing information about compiled functions to
a catalog. When writing this information, the first writable catalog
file in PYTHONCOMPILED path is used. If a writable catalog is not
found, it is written to the catalog in the default directory. This
directory should always be writable.
"""
def __init__(self,user_path_list=None):
""" Create a catalog for storing/searching for compiled functions.
user_path_list contains directories that should be searched
first for function catalogs. They will come before the path
entries in the PYTHONCOMPILED environment varilable.
"""
if isinstance(user_path_list, str):
self.user_path_list = [user_path_list]
elif user_path_list:
self.user_path_list = user_path_list
else:
self.user_path_list = []
self.cache = {}
self.module_dir = None
self.paths_added = 0
# unconditionally append the default dir for auto-generated compiled
# extension modules, so that pickle.load()s don't fail.
sys.path.append(default_dir())
def set_module_directory(self,module_dir):
""" Set the path that will replace 'MODULE' in catalog searches.
You should call clear_module_directory() when your finished
working with it.
"""
self.module_dir = module_dir
def get_module_directory(self):
""" Return the path used to replace the 'MODULE' in searches.
"""
return self.module_dir
def clear_module_directory(self):
""" Reset 'MODULE' path to None so that it is ignored in searches.
"""
self.module_dir = None
def get_environ_path(self):
""" Return list of paths from 'PYTHONCOMPILED' environment variable.
On Unix the path in PYTHONCOMPILED is a ':' separated list of
directories. On Windows, a ';' separated list is used.
"""
paths = []
if 'PYTHONCOMPILED' in os.environ:
path_string = os.environ['PYTHONCOMPILED']
paths = path_string.split(os.path.pathsep)
return paths
def build_search_order(self):
""" Returns a list of paths that are searched for catalogs.
Values specified in the catalog constructor are searched first,
then values found in the PYTHONCOMPILED environment variable.
The directory returned by default_dir() is always returned at
the end of the list.
There is a 'magic' path name called 'MODULE' that is replaced
by the directory defined by set_module_directory(). If the
module directory hasn't been set, 'MODULE' is ignored.
"""
paths = self.user_path_list + self.get_environ_path()
search_order = []
for path in paths:
if path == 'MODULE':
if self.module_dir:
search_order.append(self.module_dir)
else:
search_order.append(path)
search_order.append(default_dir())
return search_order
def get_catalog_files(self):
""" Returns catalog file list in correct search order.
Some of the catalog files may not currently exists.
However, all will be valid locations for a catalog
to be created (if you have write permission).
"""
files = map(catalog_path,self.build_search_order())
files = filter(lambda x: x is not None,files)
return files
def get_existing_files(self):
""" Returns all existing catalog file list in correct search order.
"""
files = self.get_catalog_files()
# open every stinking file to check if it exists.
# This is because anydbm doesn't provide a consistent naming
# convention across platforms for its files
existing_files = []
for file in files:
cat = get_catalog(os.path.dirname(file),'r')
if cat is not None:
existing_files.append(file)
cat.close()
# This is the non-portable (and much faster) old code
#existing_files = filter(os.path.exists,files)
return existing_files
def get_writable_file(self,existing_only=0):
""" Return the name of the first writable catalog file.
Its parent directory must also be writable. This is so that
compiled modules can be written to the same directory.
"""
# note: both file and its parent directory must be writeable
if existing_only:
files = self.get_existing_files()
else:
files = self.get_catalog_files()
# filter for (file exists and is writable) OR directory is writable
def file_test(x):
from os import access, F_OK, W_OK
return (access(x,F_OK) and access(x,W_OK) or
access(os.path.dirname(x),W_OK))
writable = filter(file_test,files)
if writable:
file = writable[0]
else:
file = None
return file
def get_writable_dir(self):
""" Return the parent directory of first writable catalog file.
The returned directory has write access.
"""
return os.path.dirname(self.get_writable_file())
def unique_module_name(self,code,module_dir=None):
""" Return full path to unique file name that in writable location.
The directory for the file is the first writable directory in
the catalog search path. The unique file name is derived from
the code fragment. If, module_dir is specified, it is used
to replace 'MODULE' in the search path.
"""
if module_dir is not None:
self.set_module_directory(module_dir)
try:
d = self.get_writable_dir()
finally:
if module_dir is not None:
self.clear_module_directory()
return unique_file(d, code)
def path_key(self,code):
""" Return key for path information for functions associated with code.
"""
return '__path__' + code
def configure_path(self,cat,code):
""" Add the python path for the given code to the sys.path
unconfigure_path() should be called as soon as possible after
imports associated with code are finished so that sys.path
is restored to normal.
"""
try:
paths = cat[self.path_key(code)]
self.paths_added = len(paths)
sys.path = paths + sys.path
except:
self.paths_added = 0
def unconfigure_path(self):
""" Restores sys.path to normal after calls to configure_path()
Remove the previously added paths from sys.path
"""
sys.path = sys.path[self.paths_added:]
self.paths_added = 0
def get_cataloged_functions(self,code):
""" Load all functions associated with code from catalog search path.
Sometimes there can be trouble loading a function listed in a
catalog file because the actual module that holds the function
has been moved or deleted. When this happens, that catalog file
is "repaired", meaning the entire entry for this function is
removed from the file. This only affects the catalog file that
has problems -- not the others in the search path.
The "repair" behavior may not be needed, but I'll keep it for now.
"""
mode = 'r'
cat = None
function_list = []
for path in self.build_search_order():
cat = get_catalog(path,mode)
if cat is not None and code in cat:
# set up the python path so that modules for this
# function can be loaded.
self.configure_path(cat,code)
try:
function_list += cat[code]
except: #SystemError and ImportError so far seen
# problems loading a function from the catalog. Try to
# repair the cause.
cat.close()
self.repair_catalog(path,code)
self.unconfigure_path()
if cat is not None:
# ensure that the catalog is properly closed
cat.close()
return function_list
def repair_catalog(self,catalog_path,code):
""" Remove entry for code from catalog_path
Occasionally catalog entries could get corrupted. An example
would be when a module that had functions in the catalog was
deleted or moved on the disk. The best current repair method is
just to trash the entire catalog entry for this piece of code.
This may loose function entries that are valid, but thats life.
catalog_path must be writable for repair. If it isn't, the
function exists with a warning.
"""
writable_cat = None
if (catalog_path is not None) and (not os.path.exists(catalog_path)):
return
try:
writable_cat = get_catalog(catalog_path,'w')
except:
print('warning: unable to repair catalog entry\n %s\n in\n %s' % \
(code,catalog_path))
# shelve doesn't guarantee flushing, so it's safest to explicitly
# close the catalog
writable_cat.close()
return
if code in writable_cat:
print('repairing catalog by removing key')
del writable_cat[code]
# it is possible that the path key doesn't exist (if the function
# registered was a built-in function), so we have to check if the path
# exists before arbitrarily deleting it.
path_key = self.path_key(code)
if path_key in writable_cat:
del writable_cat[path_key]
writable_cat.close()
def get_functions_fast(self,code):
""" Return list of functions for code from the cache.
Return an empty list if the code entry is not found.
"""
return self.cache.get(code,[])
def get_functions(self,code,module_dir=None):
""" Return the list of functions associated with this code fragment.
The cache is first searched for the function. If an entry
in the cache is not found, then catalog files on disk are
searched for the entry. This is slooooow, but only happens
once per code object. All the functions found in catalog files
on a cache miss are loaded into the cache to speed up future calls.
The search order is as follows:
1. user specified path (from catalog initialization)
2. directories from the PYTHONCOMPILED environment variable
3. The temporary directory on your platform.
The path specified by module_dir will replace the 'MODULE'
place holder in the catalog search path. See build_search_order()
for more info on the search path.
"""
# Fast!! try cache first.
if code in self.cache:
return self.cache[code]
# 2. Slow!! read previously compiled functions from disk.
try:
self.set_module_directory(module_dir)
function_list = self.get_cataloged_functions(code)
# put function_list in cache to save future lookups.
if function_list:
self.cache[code] = function_list
# return function_list, empty or otherwise.
finally:
self.clear_module_directory()
return function_list
def add_function(self,code,function,module_dir=None):
""" Adds a function to the catalog.
The function is added to the cache as well as the first
writable file catalog found in the search path. If no
code entry exists in the cache, the on disk catalogs
are loaded into the cache and function is added to the
beginning of the function list.
The path specified by module_dir will replace the 'MODULE'
place holder in the catalog search path. See build_search_order()
for more info on the search path.
"""
# 1. put it in the cache.
if code in self.cache:
if function not in self.cache[code]:
self.cache[code].insert(0,function)
else:
# if it is in the cache, then it is also
# been persisted
return
else:
# Load functions and put this one up front
self.cache[code] = self.get_functions(code)
self.fast_cache(code,function)
# 2. Store the function entry to disk.
try:
self.set_module_directory(module_dir)
self.add_function_persistent(code,function)
finally:
self.clear_module_directory()
def add_function_persistent(self,code,function):
""" Store the code->function relationship to disk.
Two pieces of information are needed for loading functions
from disk -- the function pickle (which conveniently stores
the module name, etc.) and the path to its module's directory.
The latter is needed so that the function can be loaded no
matter what the user's Python path is.
"""
# add function to data in first writable catalog
mode = 'c' # create if doesn't exist, otherwise, use existing
cat_dir = self.get_writable_dir()
cat = get_catalog(cat_dir,mode)
if cat is None:
cat_dir = default_dir()
cat = get_catalog(cat_dir,mode)
if cat is None:
cat_dir = default_dir()
cat_file = catalog_path(cat_dir)
print('problems with default catalog -- removing')
import glob
files = glob.glob(cat_file+'*')
for f in files:
os.remove(f)
cat = get_catalog(cat_dir,mode)
if cat is None:
raise ValueError('Failed to access a catalog for storing functions')
# Prabhu was getting some corrupt catalog errors. I'll put a try/except
# to protect against this, but should really try and track down the issue.
function_list = [function]
try:
function_list = function_list + cat.get(code,[])
except pickle.UnpicklingError:
pass
cat[code] = function_list
# now add needed path information for loading function
module = getmodule(function)
try:
# built in modules don't have the __file__ extension, so this
# will fail. Just pass in this case since path additions aren't
# needed for built-in modules.
mod_path,f=os.path.split(os.path.abspath(module.__file__))
pkey = self.path_key(code)
cat[pkey] = [mod_path] + cat.get(pkey,[])
except:
pass
cat.close()
def fast_cache(self,code,function):
""" Move function to the front of the cache entry for code
If future calls to the function have the same type signature,
this will speed up access significantly because the first
function call is correct.
Note: The cache added to the inline_tools module is significantly
faster than always calling get_functions, so this isn't
as necessary as it used to be. Still, it's probably worth
doing.
"""
try:
if self.cache[code][0] == function:
return
except: # KeyError, IndexError
pass
try:
self.cache[code].remove(function)
except ValueError:
pass
# put new function at the beginning of the list to search.
self.cache[code].insert(0,function)
| ./CrossVul/dataset_final_sorted/CWE-269/py/bad_5731_0 |
crossvul-python_data_good_5731_0 | """ Track relationships between compiled extension functions & code fragments
catalog keeps track of which compiled(or even standard) functions are
related to which code fragments. It also stores these relationships
to disk so they are remembered between Python sessions. When
a = 1
compiler.inline('printf("printed from C: %d",a);',['a'] )
is called, inline() first looks to see if it has seen the code
'printf("printed from C");' before. If not, it calls
catalog.get_functions('printf("printed from C: %d", a);')
which returns a list of all the function objects that have been compiled
for the code fragment. Multiple functions can occur because the code
could be compiled for different types for 'a' (although not likely in
this case). The catalog first looks in its cache and quickly returns
a list of the functions if possible. If the cache lookup fails, it then
looks through possibly multiple catalog files on disk and fills its
cache with all the functions that match the code fragment.
In case where the code fragment hasn't been compiled, inline() compiles
the code and then adds it to the catalog:
function = <code to compile function>
catalog.add_function('printf("printed from C: %d", a);',function)
add_function() adds function to the front of the cache. function,
along with the path information to its module, are also stored in a
persistent catalog for future use by python sessions.
"""
from __future__ import absolute_import, print_function
import os
import sys
import stat
import pickle
import socket
import tempfile
try:
import dbhash
import shelve
dumb = 0
except ImportError:
from . import _dumb_shelve as shelve
dumb = 1
#For testing...
#import scipy.io.dumb_shelve as shelve
#dumb = 1
#import shelve
#dumb = 0
def getmodule(object):
""" Discover the name of the module where object was defined.
This is an augmented version of inspect.getmodule that can discover
the parent module for extension functions.
"""
import inspect
value = inspect.getmodule(object)
if value is None:
#walk trough all modules looking for function
for name,mod in sys.modules.items():
# try except used because of some comparison failures
# in wxPoint code. Need to review this
try:
if mod and object in mod.__dict__.values():
value = mod
# if it is a built-in module, keep looking to see
# if a non-builtin also has it. Otherwise quit and
# consider the module found. (ain't perfect, but will
# have to do for now).
if str(mod) not in '(built-in)':
break
except (TypeError, KeyError, ImportError):
pass
return value
def expr_to_filename(expr):
""" Convert an arbitrary expr string to a valid file name.
The name is based on the md5 check sum for the string and
Something that was a little more human readable would be
nice, but the computer doesn't seem to care.
"""
import scipy.weave.md5_load as md5
base = 'sc_'
return base + md5.new(expr).hexdigest()
def unique_file(d,expr):
""" Generate a unqiue file name based on expr in directory d
This is meant for use with building extension modules, so
a file name is considered unique if none of the following
extension '.cpp','.o','.so','module.so','.py', or '.pyd'
exists in directory d. The fully qualified path to the
new name is returned. You'll need to append your own
extension to it before creating files.
"""
files = os.listdir(d)
#base = 'scipy_compile'
base = expr_to_filename(expr)
for i in xrange(1000000):
fname = base + repr(i)
if not (fname+'.cpp' in files or
fname+'.o' in files or
fname+'.so' in files or
fname+'module.so' in files or
fname+'.py' in files or
fname+'.pyd' in files):
break
return os.path.join(d,fname)
def is_writable(dir):
"""Determine whether a given directory is writable in a portable manner.
Parameters
----------
dir : str
A string represeting a path to a directory on the filesystem.
Returns
-------
res : bool
True or False.
"""
if not os.path.isdir(dir):
return False
# Do NOT use a hardcoded name here due to the danger from race conditions
# on NFS when multiple processes are accessing the same base directory in
# parallel. We use both hostname and process id for the prefix in an
# attempt to ensure that there can really be no name collisions (tempfile
# appends 6 random chars to this prefix).
prefix = 'dummy_%s_%s_' % (socket.gethostname(),os.getpid())
try:
tmp = tempfile.TemporaryFile(prefix=prefix,dir=dir)
except OSError:
return False
# The underlying file is destroyed upon closing the file object (under
# *nix, it was unlinked at creation time)
tmp.close()
return True
def whoami():
"""return a string identifying the user."""
return os.environ.get("USER") or os.environ.get("USERNAME") or "unknown"
def _create_dirs(path):
""" create provided path, ignore errors """
try:
os.makedirs(path, mode=0o700)
except OSError:
pass
def default_dir_posix(tmp_dir=None):
"""
Create or find default catalog store for posix systems
purpose of 'tmp_dir' is to enable way how to test this function easily
"""
path_candidates = []
python_name = "python%d%d_compiled" % tuple(sys.version_info[:2])
if tmp_dir:
home_dir = tmp_dir
else:
home_dir = os.path.expanduser('~')
tmp_dir = tmp_dir or tempfile.gettempdir()
home_temp_dir_name = '.' + python_name
home_temp_dir = os.path.join(home_dir, home_temp_dir_name)
path_candidates.append(home_temp_dir)
temp_dir_name = repr(os.getuid()) + '_' + python_name
temp_dir_path = os.path.join(tmp_dir, temp_dir_name)
path_candidates.append(temp_dir_path)
for path in path_candidates:
_create_dirs(path)
if check_dir(path):
return path
# since we got here, both dirs are not useful
tmp_dir_path = find_valid_temp_dir(temp_dir_name, tmp_dir)
if not tmp_dir_path:
tmp_dir_path = create_temp_dir(temp_dir_name, tmp_dir=tmp_dir)
return tmp_dir_path
def default_dir_win(tmp_dir=None):
"""
Create or find default catalog store for Windows systems
purpose of 'tmp_dir' is to enable way how to test this function easily
"""
def create_win_temp_dir(prefix, inner_dir=None, tmp_dir=None):
"""
create temp dir starting with 'prefix' in 'tmp_dir' or
'tempfile.gettempdir'; if 'inner_dir' is specified, it should be
created inside
"""
tmp_dir_path = find_valid_temp_dir(prefix, tmp_dir)
if tmp_dir_path:
if inner_dir:
tmp_dir_path = os.path.join(tmp_dir_path, inner_dir)
if not os.path.isdir(tmp_dir_path):
os.mkdir(tmp_dir_path, 0o700)
else:
tmp_dir_path = create_temp_dir(prefix, inner_dir, tmp_dir)
return tmp_dir_path
python_name = "python%d%d_compiled" % tuple(sys.version_info[:2])
tmp_dir = tmp_dir or tempfile.gettempdir()
temp_dir_name = "%s" % whoami()
temp_root_dir = os.path.join(tmp_dir, temp_dir_name)
temp_dir_path = os.path.join(temp_root_dir, python_name)
_create_dirs(temp_dir_path)
if check_dir(temp_dir_path) and check_dir(temp_root_dir):
return temp_dir_path
else:
if check_dir(temp_root_dir):
return create_win_temp_dir(python_name, tmp_dir=temp_root_dir)
else:
return create_win_temp_dir(temp_dir_name, python_name, tmp_dir)
def default_dir():
""" Return a default location to store compiled files and catalogs.
XX is the Python version number in all paths listed below
On windows, the default location is the temporary directory
returned by gettempdir()/pythonXX.
On Unix, ~/.pythonXX_compiled is the default location. If it doesn't
exist, it is created. The directory is marked rwx------.
If for some reason it isn't possible to build a default directory
in the user's home, /tmp/<uid>_pythonXX_compiled is used. If it
doesn't exist, it is created. The directory is marked rwx------
to try and keep people from being able to sneak a bad module
in on you. If the directory already exists in /tmp/ and is not
secure, new one is created.
"""
# Use a cached value for fast return if possible
if hasattr(default_dir, "cached_path") and \
check_dir(default_dir.cached_path):
return default_dir.cached_path
if sys.platform == 'win32':
path = default_dir_win()
else:
path = default_dir_posix()
# Cache the default dir path so that this function returns quickly after
# being called once (nothing in it should change after the first call)
default_dir.cached_path = path
return path
def check_dir(im_dir):
"""
Check if dir is safe; if it is, return True.
These checks make sense only on posix:
* directory has correct owner
* directory has correct permissions (0700)
* directory is not a symlink
"""
def check_is_dir():
return os.path.isdir(im_dir)
def check_permissions():
""" If on posix, permissions should be 0700. """
writable = is_writable(im_dir)
if sys.platform != 'win32':
try:
im_dir_stat = os.stat(im_dir)
except OSError:
return False
writable &= stat.S_IMODE(im_dir_stat.st_mode) == 0o0700
return writable
def check_ownership():
""" Intermediate dir owner should be same as owner of process. """
if sys.platform != 'win32':
try:
im_dir_stat = os.stat(im_dir)
except OSError:
return False
proc_uid = os.getuid()
return proc_uid == im_dir_stat.st_uid
return True
def check_is_symlink():
""" Check if intermediate dir is symlink. """
try:
return not os.path.islink(im_dir)
except OSError:
return False
checks = [check_is_dir, check_permissions,
check_ownership, check_is_symlink]
for check in checks:
if not check():
return False
return True
def create_temp_dir(prefix, inner_dir=None, tmp_dir=None):
"""
Create intermediate dirs <tmp>/<prefix+random suffix>/<inner_dir>/
argument 'tmp_dir' is used in unit tests
"""
if not tmp_dir:
tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
else:
tmp_dir_path = tempfile.mkdtemp(prefix=prefix, dir=tmp_dir)
if inner_dir:
tmp_dir_path = os.path.join(tmp_dir_path, inner_dir)
os.mkdir(tmp_dir_path, 0o700)
return tmp_dir_path
def intermediate_dir_prefix():
""" Prefix of root intermediate dir (<tmp>/<root_im_dir>). """
return "%s-%s-" % ("scipy", whoami())
def find_temp_dir(prefix, tmp_dir=None):
""" Find temp dirs in 'tmp_dir' starting with 'prefix'"""
matches = []
tmp_dir = tmp_dir or tempfile.gettempdir()
for tmp_file in os.listdir(tmp_dir):
if tmp_file.startswith(prefix):
matches.append(os.path.join(tmp_dir, tmp_file))
return matches
def find_valid_temp_dir(prefix, tmp_dir=None):
"""
Try to look for existing temp dirs.
If there is one suitable found, return it, otherwise return None.
"""
matches = find_temp_dir(prefix, tmp_dir)
for match in matches:
if check_dir(match):
# as soon as we find correct dir, we can stop searching
return match
def py_intermediate_dir():
"""
Name of intermediate dir for current python interpreter:
<temp dir>/<name>/pythonXY_intermediate/
"""
name = "python%d%d_intermediate" % tuple(sys.version_info[:2])
return name
def create_intermediate_dir(tmp_dir=None):
py_im_dir = py_intermediate_dir()
return create_temp_dir(intermediate_dir_prefix(), py_im_dir, tmp_dir)
def intermediate_dir(tmp_dir=None):
"""
Temporary directory for storing .cpp and .o files during builds.
First, try to find the dir and if it exists, verify it is safe.
Otherwise, create it.
"""
im_dir = find_valid_temp_dir(intermediate_dir_prefix(), tmp_dir)
py_im_dir = py_intermediate_dir()
if im_dir is None:
py_im_dir = py_intermediate_dir()
im_dir = create_intermediate_dir(tmp_dir)
else:
im_dir = os.path.join(im_dir, py_im_dir)
if not os.path.isdir(im_dir):
os.mkdir(im_dir, 0o700)
return im_dir
def default_temp_dir():
path = os.path.join(default_dir(),'temp')
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
if not is_writable(path):
print('warning: default directory is not write accessible.')
print('default:', path)
return path
def os_dependent_catalog_name():
""" Generate catalog name dependent on OS and Python version being used.
This allows multiple platforms to have catalog files in the
same directory without stepping on each other. For now, it
bases the name of the value returned by sys.platform and the
version of python being run. If this isn't enough to descriminate
on some platforms, we can try to add other info. It has
occurred to me that if we get fancy enough to optimize for different
architectures, then chip type might be added to the catalog name also.
"""
version = '%d%d' % sys.version_info[:2]
return sys.platform+version+'compiled_catalog'
def catalog_path(module_path):
""" Return the full path name for the catalog file in the given directory.
module_path can either be a file name or a path name. If it is a
file name, the catalog file name in its parent directory is returned.
If it is a directory, the catalog file in that directory is returned.
If module_path doesn't exist, None is returned. Note though, that the
catalog file does *not* have to exist, only its parent. '~', shell
variables, and relative ('.' and '..') paths are all acceptable.
catalog file names are os dependent (based on sys.platform), so this
should support multiple platforms sharing the same disk space
(NFS mounts). See os_dependent_catalog_name() for more info.
"""
module_path = os.path.expanduser(module_path)
module_path = os.path.expandvars(module_path)
module_path = os.path.abspath(module_path)
if not os.path.exists(module_path):
catalog_file = None
elif not os.path.isdir(module_path):
module_path,dummy = os.path.split(module_path)
catalog_file = os.path.join(module_path,os_dependent_catalog_name())
else:
catalog_file = os.path.join(module_path,os_dependent_catalog_name())
return catalog_file
def get_catalog(module_path,mode='r'):
""" Return a function catalog (shelve object) from the path module_path
If module_path is a directory, the function catalog returned is
from that directory. If module_path is an actual module_name,
then the function catalog returned is from its parent directory.
mode uses the standard 'c' = create, 'n' = new, 'r' = read,
'w' = write file open modes available for anydbm databases.
Well... it should be. Stuck with dumbdbm for now and the modes
almost don't matter. We do some checking for 'r' mode, but that
is about it.
See catalog_path() for more information on module_path.
"""
if mode not in ['c','r','w','n']:
msg = " mode must be 'c', 'n', 'r', or 'w'. See anydbm for more info"
raise ValueError(msg)
catalog_file = catalog_path(module_path)
if (catalog_file is not None) \
and ((dumb and os.path.exists(catalog_file+'.dat')) \
or os.path.exists(catalog_file)):
sh = shelve.open(catalog_file,mode)
else:
if mode=='r':
sh = None
else:
sh = shelve.open(catalog_file,mode)
return sh
class catalog(object):
""" Stores information about compiled functions both in cache and on disk.
catalog stores (code, list_of_function) pairs so that all the functions
that have been compiled for code are available for calling (usually in
inline or blitz).
catalog keeps a dictionary of previously accessed code values cached
for quick access. It also handles the looking up of functions compiled
in previously called Python sessions on disk in function catalogs.
catalog searches the directories in the PYTHONCOMPILED environment
variable in order loading functions that correspond to the given code
fragment. A default directory is also searched for catalog functions.
On unix, the default directory is usually '~/.pythonxx_compiled' where
xx is the version of Python used. On windows, it is the directory
returned by temfile.gettempdir(). Functions closer to the front are of
the variable list are guaranteed to be closer to the front of the
function list so that they will be called first. See
get_cataloged_functions() for more info on how the search order is
traversed.
Catalog also handles storing information about compiled functions to
a catalog. When writing this information, the first writable catalog
file in PYTHONCOMPILED path is used. If a writable catalog is not
found, it is written to the catalog in the default directory. This
directory should always be writable.
"""
def __init__(self,user_path_list=None):
""" Create a catalog for storing/searching for compiled functions.
user_path_list contains directories that should be searched
first for function catalogs. They will come before the path
entries in the PYTHONCOMPILED environment varilable.
"""
if isinstance(user_path_list, str):
self.user_path_list = [user_path_list]
elif user_path_list:
self.user_path_list = user_path_list
else:
self.user_path_list = []
self.cache = {}
self.module_dir = None
self.paths_added = 0
# unconditionally append the default dir for auto-generated compiled
# extension modules, so that pickle.load()s don't fail.
sys.path.append(default_dir())
def set_module_directory(self,module_dir):
""" Set the path that will replace 'MODULE' in catalog searches.
You should call clear_module_directory() when your finished
working with it.
"""
self.module_dir = module_dir
def get_module_directory(self):
""" Return the path used to replace the 'MODULE' in searches.
"""
return self.module_dir
def clear_module_directory(self):
""" Reset 'MODULE' path to None so that it is ignored in searches.
"""
self.module_dir = None
def get_environ_path(self):
""" Return list of paths from 'PYTHONCOMPILED' environment variable.
On Unix the path in PYTHONCOMPILED is a ':' separated list of
directories. On Windows, a ';' separated list is used.
"""
paths = []
if 'PYTHONCOMPILED' in os.environ:
path_string = os.environ['PYTHONCOMPILED']
paths = path_string.split(os.path.pathsep)
return paths
def build_search_order(self):
""" Returns a list of paths that are searched for catalogs.
Values specified in the catalog constructor are searched first,
then values found in the PYTHONCOMPILED environment variable.
The directory returned by default_dir() is always returned at
the end of the list.
There is a 'magic' path name called 'MODULE' that is replaced
by the directory defined by set_module_directory(). If the
module directory hasn't been set, 'MODULE' is ignored.
"""
paths = self.user_path_list + self.get_environ_path()
search_order = []
for path in paths:
if path == 'MODULE':
if self.module_dir:
search_order.append(self.module_dir)
else:
search_order.append(path)
search_order.append(default_dir())
return search_order
def get_catalog_files(self):
""" Returns catalog file list in correct search order.
Some of the catalog files may not currently exists.
However, all will be valid locations for a catalog
to be created (if you have write permission).
"""
files = map(catalog_path,self.build_search_order())
files = filter(lambda x: x is not None,files)
return files
def get_existing_files(self):
""" Returns all existing catalog file list in correct search order.
"""
files = self.get_catalog_files()
# open every stinking file to check if it exists.
# This is because anydbm doesn't provide a consistent naming
# convention across platforms for its files
existing_files = []
for file in files:
cat = get_catalog(os.path.dirname(file),'r')
if cat is not None:
existing_files.append(file)
cat.close()
# This is the non-portable (and much faster) old code
#existing_files = filter(os.path.exists,files)
return existing_files
def get_writable_file(self,existing_only=0):
""" Return the name of the first writable catalog file.
Its parent directory must also be writable. This is so that
compiled modules can be written to the same directory.
"""
# note: both file and its parent directory must be writeable
if existing_only:
files = self.get_existing_files()
else:
files = self.get_catalog_files()
# filter for (file exists and is writable) OR directory is writable
def file_test(x):
from os import access, F_OK, W_OK
return (access(x,F_OK) and access(x,W_OK) or
access(os.path.dirname(x),W_OK))
writable = filter(file_test,files)
if writable:
file = writable[0]
else:
file = None
return file
def get_writable_dir(self):
""" Return the parent directory of first writable catalog file.
The returned directory has write access.
"""
return os.path.dirname(self.get_writable_file())
def unique_module_name(self,code,module_dir=None):
""" Return full path to unique file name that in writable location.
The directory for the file is the first writable directory in
the catalog search path. The unique file name is derived from
the code fragment. If, module_dir is specified, it is used
to replace 'MODULE' in the search path.
"""
if module_dir is not None:
self.set_module_directory(module_dir)
try:
d = self.get_writable_dir()
finally:
if module_dir is not None:
self.clear_module_directory()
return unique_file(d, code)
def path_key(self,code):
""" Return key for path information for functions associated with code.
"""
return '__path__' + code
def configure_path(self,cat,code):
""" Add the python path for the given code to the sys.path
unconfigure_path() should be called as soon as possible after
imports associated with code are finished so that sys.path
is restored to normal.
"""
try:
paths = cat[self.path_key(code)]
self.paths_added = len(paths)
sys.path = paths + sys.path
except:
self.paths_added = 0
def unconfigure_path(self):
""" Restores sys.path to normal after calls to configure_path()
Remove the previously added paths from sys.path
"""
sys.path = sys.path[self.paths_added:]
self.paths_added = 0
def get_cataloged_functions(self,code):
""" Load all functions associated with code from catalog search path.
Sometimes there can be trouble loading a function listed in a
catalog file because the actual module that holds the function
has been moved or deleted. When this happens, that catalog file
is "repaired", meaning the entire entry for this function is
removed from the file. This only affects the catalog file that
has problems -- not the others in the search path.
The "repair" behavior may not be needed, but I'll keep it for now.
"""
mode = 'r'
cat = None
function_list = []
for path in self.build_search_order():
cat = get_catalog(path,mode)
if cat is not None and code in cat:
# set up the python path so that modules for this
# function can be loaded.
self.configure_path(cat,code)
try:
function_list += cat[code]
except: #SystemError and ImportError so far seen
# problems loading a function from the catalog. Try to
# repair the cause.
cat.close()
self.repair_catalog(path,code)
self.unconfigure_path()
if cat is not None:
# ensure that the catalog is properly closed
cat.close()
return function_list
def repair_catalog(self,catalog_path,code):
""" Remove entry for code from catalog_path
Occasionally catalog entries could get corrupted. An example
would be when a module that had functions in the catalog was
deleted or moved on the disk. The best current repair method is
just to trash the entire catalog entry for this piece of code.
This may loose function entries that are valid, but thats life.
catalog_path must be writable for repair. If it isn't, the
function exists with a warning.
"""
writable_cat = None
if (catalog_path is not None) and (not os.path.exists(catalog_path)):
return
try:
writable_cat = get_catalog(catalog_path,'w')
except:
print('warning: unable to repair catalog entry\n %s\n in\n %s' % \
(code,catalog_path))
# shelve doesn't guarantee flushing, so it's safest to explicitly
# close the catalog
writable_cat.close()
return
if code in writable_cat:
print('repairing catalog by removing key')
del writable_cat[code]
# it is possible that the path key doesn't exist (if the function
# registered was a built-in function), so we have to check if the path
# exists before arbitrarily deleting it.
path_key = self.path_key(code)
if path_key in writable_cat:
del writable_cat[path_key]
writable_cat.close()
def get_functions_fast(self,code):
""" Return list of functions for code from the cache.
Return an empty list if the code entry is not found.
"""
return self.cache.get(code,[])
def get_functions(self,code,module_dir=None):
""" Return the list of functions associated with this code fragment.
The cache is first searched for the function. If an entry
in the cache is not found, then catalog files on disk are
searched for the entry. This is slooooow, but only happens
once per code object. All the functions found in catalog files
on a cache miss are loaded into the cache to speed up future calls.
The search order is as follows:
1. user specified path (from catalog initialization)
2. directories from the PYTHONCOMPILED environment variable
3. The temporary directory on your platform.
The path specified by module_dir will replace the 'MODULE'
place holder in the catalog search path. See build_search_order()
for more info on the search path.
"""
# Fast!! try cache first.
if code in self.cache:
return self.cache[code]
# 2. Slow!! read previously compiled functions from disk.
try:
self.set_module_directory(module_dir)
function_list = self.get_cataloged_functions(code)
# put function_list in cache to save future lookups.
if function_list:
self.cache[code] = function_list
# return function_list, empty or otherwise.
finally:
self.clear_module_directory()
return function_list
def add_function(self,code,function,module_dir=None):
""" Adds a function to the catalog.
The function is added to the cache as well as the first
writable file catalog found in the search path. If no
code entry exists in the cache, the on disk catalogs
are loaded into the cache and function is added to the
beginning of the function list.
The path specified by module_dir will replace the 'MODULE'
place holder in the catalog search path. See build_search_order()
for more info on the search path.
"""
# 1. put it in the cache.
if code in self.cache:
if function not in self.cache[code]:
self.cache[code].insert(0,function)
else:
# if it is in the cache, then it is also
# been persisted
return
else:
# Load functions and put this one up front
self.cache[code] = self.get_functions(code)
self.fast_cache(code,function)
# 2. Store the function entry to disk.
try:
self.set_module_directory(module_dir)
self.add_function_persistent(code,function)
finally:
self.clear_module_directory()
def add_function_persistent(self,code,function):
""" Store the code->function relationship to disk.
Two pieces of information are needed for loading functions
from disk -- the function pickle (which conveniently stores
the module name, etc.) and the path to its module's directory.
The latter is needed so that the function can be loaded no
matter what the user's Python path is.
"""
# add function to data in first writable catalog
mode = 'c' # create if doesn't exist, otherwise, use existing
cat_dir = self.get_writable_dir()
cat = get_catalog(cat_dir,mode)
if cat is None:
cat_dir = default_dir()
cat = get_catalog(cat_dir,mode)
if cat is None:
cat_dir = default_dir()
cat_file = catalog_path(cat_dir)
print('problems with default catalog -- removing')
import glob
files = glob.glob(cat_file+'*')
for f in files:
os.remove(f)
cat = get_catalog(cat_dir,mode)
if cat is None:
raise ValueError('Failed to access a catalog for storing functions')
# Prabhu was getting some corrupt catalog errors. I'll put a try/except
# to protect against this, but should really try and track down the issue.
function_list = [function]
try:
function_list = function_list + cat.get(code,[])
except pickle.UnpicklingError:
pass
cat[code] = function_list
# now add needed path information for loading function
module = getmodule(function)
try:
# built in modules don't have the __file__ extension, so this
# will fail. Just pass in this case since path additions aren't
# needed for built-in modules.
mod_path,f=os.path.split(os.path.abspath(module.__file__))
pkey = self.path_key(code)
cat[pkey] = [mod_path] + cat.get(pkey,[])
except:
pass
cat.close()
def fast_cache(self,code,function):
""" Move function to the front of the cache entry for code
If future calls to the function have the same type signature,
this will speed up access significantly because the first
function call is correct.
Note: The cache added to the inline_tools module is significantly
faster than always calling get_functions, so this isn't
as necessary as it used to be. Still, it's probably worth
doing.
"""
try:
if self.cache[code][0] == function:
return
except: # KeyError, IndexError
pass
try:
self.cache[code].remove(function)
except ValueError:
pass
# put new function at the beginning of the list to search.
self.cache[code].insert(0,function)
| ./CrossVul/dataset_final_sorted/CWE-269/py/good_5731_0 |
crossvul-python_data_good_4366_4 | """
Custom Authenticator to use GitLab OAuth with JupyterHub
"""
import json
import os
import re
import sys
import warnings
from urllib.parse import quote
from tornado.auth import OAuth2Mixin
from tornado import web
from tornado.escape import url_escape
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from jupyterhub.auth import LocalAuthenticator
from traitlets import Set, CUnicode, Unicode, default, observe
from .oauth2 import OAuthLoginHandler, OAuthenticator
def _api_headers(access_token):
return {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}".format(access_token),
}
class GitLabOAuthenticator(OAuthenticator):
# see gitlab_scopes.md for details about scope config
# set scopes via config, e.g.
# c.GitLabOAuthenticator.scope = ['read_user']
_deprecated_oauth_aliases = {
"gitlab_group_whitelist": ("allowed_gitlab_groups", "0.12.0"),
"gitlab_project_id_whitelist": ("allowed_project_ids", "0.12.0"),
**OAuthenticator._deprecated_oauth_aliases,
}
login_service = "GitLab"
client_id_env = 'GITLAB_CLIENT_ID'
client_secret_env = 'GITLAB_CLIENT_SECRET'
gitlab_url = Unicode("https://gitlab.com", config=True)
@default("gitlab_url")
def _default_gitlab_url(self):
"""get default gitlab url from env"""
gitlab_url = os.getenv('GITLAB_URL')
gitlab_host = os.getenv('GITLAB_HOST')
if not gitlab_url and gitlab_host:
warnings.warn(
'Use of GITLAB_HOST might be deprecated in the future. '
'Rename GITLAB_HOST environment variable to GITLAB_URL.',
PendingDeprecationWarning,
)
if gitlab_host.startswith(('https:', 'http:')):
gitlab_url = gitlab_host
else:
# Hides common mistake of users which set the GITLAB_HOST
# without a protocol specification.
gitlab_url = 'https://{0}'.format(gitlab_host)
warnings.warn(
'The https:// prefix has been added to GITLAB_HOST.'
'Set GITLAB_URL="{0}" instead.'.format(gitlab_host)
)
# default to gitlab.com
if not gitlab_url:
gitlab_url = 'https://gitlab.com'
return gitlab_url
gitlab_api_version = CUnicode('4', config=True)
@default('gitlab_api_version')
def _gitlab_api_version_default(self):
return os.environ.get('GITLAB_API_VERSION') or '4'
gitlab_api = Unicode(config=True)
@default("gitlab_api")
def _default_gitlab_api(self):
return '%s/api/v%s' % (self.gitlab_url, self.gitlab_api_version)
@default("authorize_url")
def _authorize_url_default(self):
return "%s/oauth/authorize" % self.gitlab_url
@default("token_url")
def _token_url_default(self):
return "%s/oauth/access_token" % self.gitlab_url
gitlab_group_whitelist = Set(help="Deprecated, use `GitLabOAuthenticator.allowed_gitlab_groups`", config=True,)
allowed_gitlab_groups = Set(
config=True, help="Automatically allow members of selected groups"
)
gitlab_project_id_whitelist = Set(help="Deprecated, use `GitLabOAuthenticator.allowed_project_ids`", config=True,)
allowed_project_ids = Set(
config=True,
help="Automatically allow members with Developer access to selected project ids",
)
gitlab_version = None
async def authenticate(self, handler, data=None):
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a GitLab Access Token
#
# See: https://github.com/gitlabhq/gitlabhq/blob/master/doc/api/oauth2.md
# GitLab specifies a POST request yet requires URL parameters
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
code=code,
grant_type="authorization_code",
redirect_uri=self.get_callback_url(handler),
)
validate_server_cert = self.validate_server_cert
url = url_concat("%s/oauth/token" % self.gitlab_url, params)
req = HTTPRequest(
url,
method="POST",
headers={"Accept": "application/json"},
validate_cert=validate_server_cert,
body='', # Body is required for a POST...
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
# memoize gitlab version for class lifetime
if self.gitlab_version is None:
self.gitlab_version = await self._get_gitlab_version(access_token)
self.member_api_variant = 'all/' if self.gitlab_version >= [12, 4] else ''
# Determine who the logged in user is
req = HTTPRequest(
"%s/user" % self.gitlab_api,
method="GET",
validate_cert=validate_server_cert,
headers=_api_headers(access_token),
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["username"]
user_id = resp_json["id"]
is_admin = resp_json.get("is_admin", False)
# Check if user is a member of any allowed groups or projects.
# These checks are performed here, as it requires `access_token`.
user_in_group = user_in_project = False
is_group_specified = is_project_id_specified = False
if self.allowed_gitlab_groups:
is_group_specified = True
user_in_group = await self._check_membership_allowed_groups(user_id, access_token)
# We skip project_id check if user is in allowed group.
if self.allowed_project_ids and not user_in_group:
is_project_id_specified = True
user_in_project = await self._check_membership_allowed_project_ids(
user_id, access_token
)
no_config_specified = not (is_group_specified or is_project_id_specified)
if (
(is_group_specified and user_in_group)
or (is_project_id_specified and user_in_project)
or no_config_specified
):
return {
'name': username,
'auth_state': {'access_token': access_token, 'gitlab_user': resp_json},
}
else:
self.log.warning("%s not in group or project allowed list", username)
return None
async def _get_gitlab_version(self, access_token):
url = '%s/version' % self.gitlab_api
req = HTTPRequest(
url,
method="GET",
headers=_api_headers(access_token),
validate_cert=self.validate_server_cert,
)
resp = await AsyncHTTPClient().fetch(req, raise_error=True)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
version_strings = resp_json['version'].split('-')[0].split('.')[:3]
version_ints = list(map(int, version_strings))
return version_ints
async def _check_membership_allowed_groups(self, user_id, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
# Check if user is a member of any group in the allowed list
for group in map(url_escape, self.allowed_gitlab_groups):
url = "%s/groups/%s/members/%s%d" % (
self.gitlab_api,
quote(group, safe=''),
self.member_api_variant,
user_id,
)
req = HTTPRequest(url, method="GET", headers=headers)
resp = await http_client.fetch(req, raise_error=False)
if resp.code == 200:
return True # user _is_ in group
return False
async def _check_membership_allowed_project_ids(self, user_id, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
# Check if user has developer access to any project in the allowed list
for project in self.allowed_project_ids:
url = "%s/projects/%s/members/%s%d" % (
self.gitlab_api,
project,
self.member_api_variant,
user_id,
)
req = HTTPRequest(url, method="GET", headers=headers)
resp = await http_client.fetch(req, raise_error=False)
if resp.body:
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_level = resp_json.get('access_level', 0)
# We only allow access level Developer and above
# Reference: https://docs.gitlab.com/ee/api/members.html
if resp.code == 200 and access_level >= 30:
return True
return False
class LocalGitLabOAuthenticator(LocalAuthenticator, GitLabOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4366_4 |
crossvul-python_data_bad_4366_6 | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
from urllib.parse import quote, urlparse
import uuid
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.log import app_log
from jupyterhub.handlers import BaseHandler
from jupyterhub.auth import Authenticator
from jupyterhub.utils import url_path_join
from traitlets import Unicode, Bool, List, Dict, default, observe
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
def _deprecated_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4366_6 |
crossvul-python_data_good_4211_1 | import asyncio
import contextlib
import logging
from datetime import datetime, timedelta, timezone
from typing import Dict, List, Optional, Tuple, Union
import discord
from redbot.core import commands, i18n, checks, modlog
from redbot.core.commands import UserInputOptional
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import (
pagify,
humanize_number,
bold,
humanize_list,
format_perms_list,
)
from redbot.core.utils.mod import get_audit_reason
from .abc import MixinMeta
from .converters import RawUserIds
from .utils import is_allowed_by_hierarchy
log = logging.getLogger("red.mod")
_ = i18n.Translator("Mod", __file__)
class KickBanMixin(MixinMeta):
"""
Kick and ban commands and tasks go here.
"""
@staticmethod
async def get_invite_for_reinvite(ctx: commands.Context, max_age: int = 86400):
"""Handles the reinvite logic for getting an invite
to send the newly unbanned user
:returns: :class:`Invite`"""
guild = ctx.guild
my_perms: discord.Permissions = guild.me.guild_permissions
if my_perms.manage_guild or my_perms.administrator:
if "VANITY_URL" in guild.features:
# guild has a vanity url so use it as the one to send
return await guild.vanity_invite()
invites = await guild.invites()
else:
invites = []
for inv in invites: # Loop through the invites for the guild
if not (inv.max_uses or inv.max_age or inv.temporary):
# Invite is for the guild's default channel,
# has unlimited uses, doesn't expire, and
# doesn't grant temporary membership
# (i.e. they won't be kicked on disconnect)
return inv
else: # No existing invite found that is valid
channels_and_perms = zip(
guild.text_channels, map(guild.me.permissions_in, guild.text_channels)
)
channel = next(
(channel for channel, perms in channels_and_perms if perms.create_instant_invite),
None,
)
if channel is None:
return
try:
# Create invite that expires after max_age
return await channel.create_invite(max_age=max_age)
except discord.HTTPException:
return
@staticmethod
async def _voice_perm_check(
ctx: commands.Context, user_voice_state: Optional[discord.VoiceState], **perms: bool
) -> bool:
"""Check if the bot and user have sufficient permissions for voicebans.
This also verifies that the user's voice state and connected
channel are not ``None``.
Returns
-------
bool
``True`` if the permissions are sufficient and the user has
a valid voice state.
"""
if user_voice_state is None or user_voice_state.channel is None:
await ctx.send(_("That user is not in a voice channel."))
return False
voice_channel: discord.VoiceChannel = user_voice_state.channel
required_perms = discord.Permissions()
required_perms.update(**perms)
if not voice_channel.permissions_for(ctx.me) >= required_perms:
await ctx.send(
_("I require the {perms} permission(s) in that user's channel to do that.").format(
perms=format_perms_list(required_perms)
)
)
return False
if (
ctx.permission_state is commands.PermState.NORMAL
and not voice_channel.permissions_for(ctx.author) >= required_perms
):
await ctx.send(
_(
"You must have the {perms} permission(s) in that user's channel to use this "
"command."
).format(perms=format_perms_list(required_perms))
)
return False
return True
async def ban_user(
self,
user: Union[discord.Member, discord.User, discord.Object],
ctx: commands.Context,
days: int = 0,
reason: str = None,
create_modlog_case=False,
) -> Tuple[bool, str]:
author = ctx.author
guild = ctx.guild
removed_temp = False
if not (0 <= days <= 7):
return False, _("Invalid days. Must be between 0 and 7.")
if isinstance(user, discord.Member):
if author == user:
return (
False,
_("I cannot let you do that. Self-harm is bad {}").format("\N{PENSIVE FACE}"),
)
elif not await is_allowed_by_hierarchy(self.bot, self.config, guild, author, user):
return (
False,
_(
"I cannot let you do that. You are "
"not higher than the user in the role "
"hierarchy."
),
)
elif guild.me.top_role <= user.top_role or user == guild.owner:
return False, _("I cannot do that due to Discord hierarchy rules.")
toggle = await self.config.guild(guild).dm_on_kickban()
if toggle:
with contextlib.suppress(discord.HTTPException):
em = discord.Embed(
title=bold(_("You have been banned from {guild}.").format(guild=guild))
)
em.add_field(
name=_("**Reason**"),
value=reason if reason is not None else _("No reason was given."),
inline=False,
)
await user.send(embed=em)
ban_type = "ban"
else:
tempbans = await self.config.guild(guild).current_tempbans()
ban_list = [ban.user.id for ban in await guild.bans()]
if user.id in ban_list:
if user.id in tempbans:
async with self.config.guild(guild).current_tempbans() as tempbans:
tempbans.remove(user.id)
removed_temp = True
else:
return (
False,
_("User with ID {user_id} is already banned.").format(user_id=user.id),
)
ban_type = "hackban"
audit_reason = get_audit_reason(author, reason)
queue_entry = (guild.id, user.id)
if removed_temp:
log.info(
"{}({}) upgraded the tempban for {} to a permaban.".format(
author.name, author.id, user.id
)
)
success_message = _(
"User with ID {user_id} was upgraded from a temporary to a permanent ban."
).format(user_id=user.id)
else:
username = user.name if hasattr(user, "name") else "Unknown"
try:
await guild.ban(user, reason=audit_reason, delete_message_days=days)
log.info(
"{}({}) {}ned {}({}), deleting {} days worth of messages.".format(
author.name, author.id, ban_type, username, user.id, str(days)
)
)
success_message = _("Done. That felt good.")
except discord.Forbidden:
return False, _("I'm not allowed to do that.")
except discord.NotFound:
return False, _("User with ID {user_id} not found").format(user_id=user.id)
except Exception as e:
log.exception(
"{}({}) attempted to {} {}({}), but an error occurred.".format(
author.name, author.id, ban_type, username, user.id
)
)
return False, _("An unexpected error occurred.")
if create_modlog_case:
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
ban_type,
user,
author,
reason,
until=None,
channel=None,
)
return True, success_message
async def check_tempban_expirations(self):
while self == self.bot.get_cog("Mod"):
async for guild in AsyncIter(self.bot.guilds, steps=100):
if not guild.me.guild_permissions.ban_members:
continue
if await self.bot.cog_disabled_in_guild(self, guild):
continue
async with self.config.guild(guild).current_tempbans() as guild_tempbans:
for uid in guild_tempbans.copy():
unban_time = datetime.fromtimestamp(
await self.config.member_from_ids(guild.id, uid).banned_until(),
timezone.utc,
)
if datetime.now(timezone.utc) > unban_time: # Time to unban the user
queue_entry = (guild.id, uid)
try:
await guild.unban(
discord.Object(id=uid), reason=_("Tempban finished")
)
except discord.NotFound:
# user is not banned anymore
guild_tempbans.remove(uid)
except discord.HTTPException as e:
# 50013: Missing permissions error code or 403: Forbidden status
if e.code == 50013 or e.status == 403:
log.info(
f"Failed to unban ({uid}) user from "
f"{guild.name}({guild.id}) guild due to permissions."
)
break # skip the rest of this guild
log.info(f"Failed to unban member: error code: {e.code}")
else:
# user unbanned successfully
guild_tempbans.remove(uid)
await asyncio.sleep(60)
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(kick_members=True)
@checks.admin_or_permissions(kick_members=True)
async def kick(self, ctx: commands.Context, user: discord.Member, *, reason: str = None):
"""Kick a user.
If a reason is specified, it will be the reason that shows up
in the audit log.
"""
author = ctx.author
guild = ctx.guild
if author == user:
await ctx.send(
_("I cannot let you do that. Self-harm is bad {emoji}").format(
emoji="\N{PENSIVE FACE}"
)
)
return
elif not await is_allowed_by_hierarchy(self.bot, self.config, guild, author, user):
await ctx.send(
_(
"I cannot let you do that. You are "
"not higher than the user in the role "
"hierarchy."
)
)
return
elif ctx.guild.me.top_role <= user.top_role or user == ctx.guild.owner:
await ctx.send(_("I cannot do that due to Discord hierarchy rules."))
return
audit_reason = get_audit_reason(author, reason)
toggle = await self.config.guild(guild).dm_on_kickban()
if toggle:
with contextlib.suppress(discord.HTTPException):
em = discord.Embed(
title=bold(_("You have been kicked from {guild}.").format(guild=guild))
)
em.add_field(
name=_("**Reason**"),
value=reason if reason is not None else _("No reason was given."),
inline=False,
)
await user.send(embed=em)
try:
await guild.kick(user, reason=audit_reason)
log.info("{}({}) kicked {}({})".format(author.name, author.id, user.name, user.id))
except discord.errors.Forbidden:
await ctx.send(_("I'm not allowed to do that."))
except Exception as e:
log.exception(
"{}({}) attempted to kick {}({}), but an error occurred.".format(
author.name, author.id, user.name, user.id
)
)
else:
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"kick",
user,
author,
reason,
until=None,
channel=None,
)
await ctx.send(_("Done. That felt good."))
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
async def ban(
self,
ctx: commands.Context,
user: Union[discord.Member, RawUserIds],
days: Optional[int] = None,
*,
reason: str = None,
):
"""Ban a user from this server and optionally delete days of messages.
A user ID should be provided if the user is not a member of this server.
If days is not a number, it's treated as the first word of the reason.
Minimum 0 days, maximum 7. If not specified, defaultdays setting will be used instead."""
author = ctx.author
guild = ctx.guild
if days is None:
days = await self.config.guild(guild).default_days()
if isinstance(user, int):
user = self.bot.get_user(user) or discord.Object(id=user)
success_, message = await self.ban_user(
user=user, ctx=ctx, days=days, reason=reason, create_modlog_case=True
)
await ctx.send(message)
@commands.command(aliases=["hackban"])
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
async def massban(
self,
ctx: commands.Context,
user_ids: commands.Greedy[RawUserIds],
days: Optional[int] = None,
*,
reason: str = None,
):
"""Mass bans user(s) from the server.
User IDs need to be provided in order to ban
using this command."""
banned = []
errors = {}
upgrades = []
async def show_results():
text = _("Banned {num} users from the server.").format(
num=humanize_number(len(banned))
)
if errors:
text += _("\nErrors:\n")
text += "\n".join(errors.values())
if upgrades:
text += _(
"\nFollowing user IDs have been upgraded from a temporary to a permanent ban:\n"
)
text += humanize_list(upgrades)
for p in pagify(text):
await ctx.send(p)
def remove_processed(ids):
return [_id for _id in ids if _id not in banned and _id not in errors]
user_ids = list(set(user_ids)) # No dupes
author = ctx.author
guild = ctx.guild
if not user_ids:
await ctx.send_help()
return
if days is None:
days = await self.config.guild(guild).default_days()
if not (0 <= days <= 7):
await ctx.send(_("Invalid days. Must be between 0 and 7."))
return
if not guild.me.guild_permissions.ban_members:
return await ctx.send(_("I lack the permissions to do this."))
tempbans = await self.config.guild(guild).current_tempbans()
ban_list = await guild.bans()
for entry in ban_list:
for user_id in user_ids:
if entry.user.id == user_id:
if user_id in tempbans:
# We need to check if a user is tempbanned here because otherwise they won't be processed later on.
continue
else:
errors[user_id] = _("User with ID {user_id} is already banned.").format(
user_id=user_id
)
user_ids = remove_processed(user_ids)
if not user_ids:
await show_results()
return
# We need to check here, if any of the users isn't a member and if they are,
# we need to use our `ban_user()` method to do hierarchy checks.
members: Dict[int, discord.Member] = {}
to_query: List[int] = []
for user_id in user_ids:
member = guild.get_member(user_id)
if member is not None:
members[user_id] = member
elif not guild.chunked:
to_query.append(user_id)
# If guild isn't chunked, we might possibly be missing the member from cache,
# so we need to make sure that isn't the case by querying the user IDs for such guilds.
while to_query:
queried_members = await guild.query_members(user_ids=to_query[:100], limit=100)
members.update((member.id, member) for member in queried_members)
to_query = to_query[100:]
# Call `ban_user()` method for all users that turned out to be guild members.
for member in members:
try:
success, reason = await self.ban_user(
user=member, ctx=ctx, days=days, reason=reason, create_modlog_case=True
)
if success:
banned.append(user_id)
else:
errors[user_id] = _("Failed to ban user {user_id}: {reason}").format(
user_id=user_id, reason=reason
)
except Exception as e:
errors[user_id] = _("Failed to ban user {user_id}: {reason}").format(
user_id=user_id, reason=e
)
user_ids = remove_processed(user_ids)
if not user_ids:
await show_results()
return
for user_id in user_ids:
user = discord.Object(id=user_id)
audit_reason = get_audit_reason(author, reason)
queue_entry = (guild.id, user_id)
async with self.config.guild(guild).current_tempbans() as tempbans:
if user_id in tempbans:
tempbans.remove(user_id)
upgrades.append(str(user_id))
log.info(
"{}({}) upgraded the tempban for {} to a permaban.".format(
author.name, author.id, user_id
)
)
banned.append(user_id)
else:
try:
await guild.ban(user, reason=audit_reason, delete_message_days=days)
log.info("{}({}) hackbanned {}".format(author.name, author.id, user_id))
except discord.NotFound:
errors[user_id] = _("User with ID {user_id} not found").format(
user_id=user_id
)
continue
except discord.Forbidden:
errors[user_id] = _(
"Could not ban user with ID {user_id}: missing permissions."
).format(user_id=user_id)
continue
else:
banned.append(user_id)
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"hackban",
user_id,
author,
reason,
until=None,
channel=None,
)
await show_results()
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
async def tempban(
self,
ctx: commands.Context,
user: discord.Member,
duration: Optional[commands.TimedeltaConverter] = None,
days: Optional[int] = None,
*,
reason: str = None,
):
"""Temporarily ban a user from this server."""
guild = ctx.guild
author = ctx.author
if author == user:
await ctx.send(
_("I cannot let you do that. Self-harm is bad {}").format("\N{PENSIVE FACE}")
)
return
elif not await is_allowed_by_hierarchy(self.bot, self.config, guild, author, user):
await ctx.send(
_(
"I cannot let you do that. You are "
"not higher than the user in the role "
"hierarchy."
)
)
return
elif guild.me.top_role <= user.top_role or user == guild.owner:
await ctx.send(_("I cannot do that due to Discord hierarchy rules."))
return
if duration is None:
duration = timedelta(seconds=await self.config.guild(guild).default_tempban_duration())
unban_time = datetime.now(timezone.utc) + duration
if days is None:
days = await self.config.guild(guild).default_days()
if not (0 <= days <= 7):
await ctx.send(_("Invalid days. Must be between 0 and 7."))
return
invite = await self.get_invite_for_reinvite(ctx, int(duration.total_seconds() + 86400))
if invite is None:
invite = ""
queue_entry = (guild.id, user.id)
await self.config.member(user).banned_until.set(unban_time.timestamp())
async with self.config.guild(guild).current_tempbans() as current_tempbans:
current_tempbans.append(user.id)
with contextlib.suppress(discord.HTTPException):
# We don't want blocked DMs preventing us from banning
msg = _("You have been temporarily banned from {server_name} until {date}.").format(
server_name=guild.name, date=unban_time.strftime("%m-%d-%Y %H:%M:%S")
)
if invite:
msg += _(" Here is an invite for when your ban expires: {invite_link}").format(
invite_link=invite
)
await user.send(msg)
try:
await guild.ban(user, reason=reason, delete_message_days=days)
except discord.Forbidden:
await ctx.send(_("I can't do that for some reason."))
except discord.HTTPException:
await ctx.send(_("Something went wrong while banning."))
else:
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"tempban",
user,
author,
reason,
unban_time,
)
await ctx.send(_("Done. Enough chaos for now."))
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
async def softban(self, ctx: commands.Context, user: discord.Member, *, reason: str = None):
"""Kick a user and delete 1 day's worth of their messages."""
guild = ctx.guild
author = ctx.author
if author == user:
await ctx.send(
_("I cannot let you do that. Self-harm is bad {emoji}").format(
emoji="\N{PENSIVE FACE}"
)
)
return
elif not await is_allowed_by_hierarchy(self.bot, self.config, guild, author, user):
await ctx.send(
_(
"I cannot let you do that. You are "
"not higher than the user in the role "
"hierarchy."
)
)
return
audit_reason = get_audit_reason(author, reason)
invite = await self.get_invite_for_reinvite(ctx)
if invite is None:
invite = ""
queue_entry = (guild.id, user.id)
try: # We don't want blocked DMs preventing us from banning
msg = await user.send(
_(
"You have been banned and "
"then unbanned as a quick way to delete your messages.\n"
"You can now join the server again. {invite_link}"
).format(invite_link=invite)
)
except discord.HTTPException:
msg = None
try:
await guild.ban(user, reason=audit_reason, delete_message_days=1)
except discord.errors.Forbidden:
await ctx.send(_("My role is not high enough to softban that user."))
if msg is not None:
await msg.delete()
return
except discord.HTTPException as e:
log.exception(
"{}({}) attempted to softban {}({}), but an error occurred trying to ban them.".format(
author.name, author.id, user.name, user.id
)
)
return
try:
await guild.unban(user)
except discord.HTTPException as e:
log.exception(
"{}({}) attempted to softban {}({}), but an error occurred trying to unban them.".format(
author.name, author.id, user.name, user.id
)
)
return
else:
log.info(
"{}({}) softbanned {}({}), deleting 1 day worth "
"of messages.".format(author.name, author.id, user.name, user.id)
)
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"softban",
user,
author,
reason,
until=None,
channel=None,
)
await ctx.send(_("Done. Enough chaos."))
@commands.command()
@commands.guild_only()
@commands.mod_or_permissions(move_members=True)
async def voicekick(
self, ctx: commands.Context, member: discord.Member, *, reason: str = None
):
"""Kick a member from a voice channel."""
author = ctx.author
guild = ctx.guild
user_voice_state: discord.VoiceState = member.voice
if await self._voice_perm_check(ctx, user_voice_state, move_members=True) is False:
return
elif not await is_allowed_by_hierarchy(self.bot, self.config, guild, author, member):
await ctx.send(
_(
"I cannot let you do that. You are "
"not higher than the user in the role "
"hierarchy."
)
)
return
case_channel = member.voice.channel
# Store this channel for the case channel.
try:
await member.move_to(None)
except discord.Forbidden: # Very unlikely that this will ever occur
await ctx.send(_("I am unable to kick this member from the voice channel."))
return
except discord.HTTPException:
await ctx.send(_("Something went wrong while attempting to kick that member."))
return
else:
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"vkick",
member,
author,
reason,
until=None,
channel=case_channel,
)
@commands.command()
@commands.guild_only()
@checks.admin_or_permissions(mute_members=True, deafen_members=True)
async def voiceunban(self, ctx: commands.Context, user: discord.Member, *, reason: str = None):
"""Unban a user from speaking and listening in the server's voice channels."""
user_voice_state = user.voice
if (
await self._voice_perm_check(
ctx, user_voice_state, deafen_members=True, mute_members=True
)
is False
):
return
needs_unmute = True if user_voice_state.mute else False
needs_undeafen = True if user_voice_state.deaf else False
audit_reason = get_audit_reason(ctx.author, reason)
if needs_unmute and needs_undeafen:
await user.edit(mute=False, deafen=False, reason=audit_reason)
elif needs_unmute:
await user.edit(mute=False, reason=audit_reason)
elif needs_undeafen:
await user.edit(deafen=False, reason=audit_reason)
else:
await ctx.send(_("That user isn't muted or deafened by the server."))
return
guild = ctx.guild
author = ctx.author
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"voiceunban",
user,
author,
reason,
until=None,
channel=None,
)
await ctx.send(_("User is now allowed to speak and listen in voice channels."))
@commands.command()
@commands.guild_only()
@checks.admin_or_permissions(mute_members=True, deafen_members=True)
async def voiceban(self, ctx: commands.Context, user: discord.Member, *, reason: str = None):
"""Ban a user from speaking and listening in the server's voice channels."""
user_voice_state: discord.VoiceState = user.voice
if (
await self._voice_perm_check(
ctx, user_voice_state, deafen_members=True, mute_members=True
)
is False
):
return
needs_mute = True if user_voice_state.mute is False else False
needs_deafen = True if user_voice_state.deaf is False else False
audit_reason = get_audit_reason(ctx.author, reason)
author = ctx.author
guild = ctx.guild
if needs_mute and needs_deafen:
await user.edit(mute=True, deafen=True, reason=audit_reason)
elif needs_mute:
await user.edit(mute=True, reason=audit_reason)
elif needs_deafen:
await user.edit(deafen=True, reason=audit_reason)
else:
await ctx.send(_("That user is already muted and deafened server-wide."))
return
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"voiceban",
user,
author,
reason,
until=None,
channel=None,
)
await ctx.send(_("User has been banned from speaking or listening in voice channels."))
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
async def unban(self, ctx: commands.Context, user_id: RawUserIds, *, reason: str = None):
"""Unban a user from this server.
Requires specifying the target user's ID. To find this, you may either:
1. Copy it from the mod log case (if one was created), or
2. enable developer mode, go to Bans in this server's settings, right-
click the user and select 'Copy ID'."""
guild = ctx.guild
author = ctx.author
audit_reason = get_audit_reason(ctx.author, reason)
bans = await guild.bans()
bans = [be.user for be in bans]
user = discord.utils.get(bans, id=user_id)
if not user:
await ctx.send(_("It seems that user isn't banned!"))
return
queue_entry = (guild.id, user_id)
try:
await guild.unban(user, reason=audit_reason)
except discord.HTTPException:
await ctx.send(_("Something went wrong while attempting to unban that user."))
return
else:
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"unban",
user,
author,
reason,
until=None,
channel=None,
)
await ctx.send(_("Unbanned that user from this server."))
if await self.config.guild(guild).reinvite_on_unban():
user = ctx.bot.get_user(user_id)
if not user:
await ctx.send(
_("I don't share another server with this user. I can't reinvite them.")
)
return
invite = await self.get_invite_for_reinvite(ctx)
if invite:
try:
await user.send(
_(
"You've been unbanned from {server}.\n"
"Here is an invite for that server: {invite_link}"
).format(server=guild.name, invite_link=invite.url)
)
except discord.Forbidden:
await ctx.send(
_(
"I failed to send an invite to that user. "
"Perhaps you may be able to send it for me?\n"
"Here's the invite link: {invite_link}"
).format(invite_link=invite.url)
)
except discord.HTTPException:
await ctx.send(
_(
"Something went wrong when attempting to send that user"
"an invite. Here's the link so you can try: {invite_link}"
).format(invite_link=invite.url)
)
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4211_1 |
crossvul-python_data_bad_4094_0 | """
JupyterHub Spawner to spawn user notebooks on a Kubernetes cluster.
This module exports `KubeSpawner` class, which is the actual spawner
implementation that should be used by JupyterHub.
"""
from functools import partial # noqa
from datetime import datetime
import json
import os
import sys
import string
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import warnings
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.concurrent import run_on_executor
from tornado import web
from traitlets import (
Bool,
Dict,
Integer,
List,
Unicode,
Union,
default,
observe,
validate,
)
from jupyterhub.spawner import Spawner
from jupyterhub.utils import exponential_backoff
from jupyterhub.traitlets import Command
from kubernetes.client.rest import ApiException
from kubernetes import client
import escapism
from jinja2 import Environment, BaseLoader
from .clients import shared_client
from kubespawner.traitlets import Callable
from kubespawner.objects import make_pod, make_pvc
from kubespawner.reflector import NamespacedResourceReflector
from asyncio import sleep
from async_generator import async_generator, yield_
from slugify import slugify
class PodReflector(NamespacedResourceReflector):
"""
PodReflector is merely a configured NamespacedResourceReflector. It exposes
the pods property, which is simply mapping to self.resources where the
NamespacedResourceReflector keeps an updated list of the resource defined by
the `kind` field and the `list_method_name` field.
"""
kind = 'pods'
list_method_name = 'list_namespaced_pod'
# FUTURE: These labels are the selection labels for the PodReflector. We
# might want to support multiple deployments in the same namespace, so we
# would need to select based on additional labels such as `app` and
# `release`.
labels = {
'component': 'singleuser-server',
}
@property
def pods(self):
"""
A dictionary of the python kubernetes client's representation of pods
for the namespace. The dictionary keys are the pod ids and the values
are the actual pod resource representations.
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#pod-v1-core
"""
return self.resources
class EventReflector(NamespacedResourceReflector):
"""
EventsReflector is merely a configured NamespacedResourceReflector. It
exposes the events property, which is simply mapping to self.resources where
the NamespacedResourceReflector keeps an updated list of the resource
defined by the `kind` field and the `list_method_name` field.
"""
kind = 'events'
list_method_name = 'list_namespaced_event'
@property
def events(self):
"""
Returns list of the python kubernetes client's representation of k8s
events within the namespace, sorted by the latest event.
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#event-v1-core
"""
# NOTE:
# - self.resources is a dictionary with keys mapping unique ids of
# Kubernetes Event resources, updated by NamespacedResourceReflector.
# self.resources will builds up with incoming k8s events, but can also
# suddenly refreshes itself entirely. We should not assume a call to
# this dictionary's values will result in a consistently ordered list,
# so we sort it to get it somewhat more structured.
# - We either seem to get only event.last_timestamp or event.event_time,
# both fields serve the same role but the former is a low resolution
# timestamp without and the other is a higher resolution timestamp.
return sorted(
self.resources.values(),
key=lambda event: event.last_timestamp or event.event_time,
)
class MockObject(object):
pass
class KubeSpawner(Spawner):
"""
A JupyterHub spawner that spawn pods in a Kubernetes Cluster. Each server
spawned by a user will have its own KubeSpawner instance.
"""
# We want to have one single threadpool executor that is shared across all
# KubeSpawner instances, so we apply a Singleton pattern. We initialize this
# class variable from the first KubeSpawner instance that is created and
# then reference it from all instances. The same goes for the PodReflector
# and EventReflector.
executor = None
reflectors = {
"pods": None,
"events": None,
}
@property
def pod_reflector(self):
"""
A convinience alias to the class variable reflectors['pods'].
"""
return self.__class__.reflectors['pods']
@property
def event_reflector(self):
"""
A convninience alias to the class variable reflectors['events'] if the
spawner instance has events_enabled.
"""
if self.events_enabled:
return self.__class__.reflectors['events']
def __init__(self, *args, **kwargs):
_mock = kwargs.pop('_mock', False)
super().__init__(*args, **kwargs)
if _mock:
# runs during test execution only
if 'user' not in kwargs:
user = MockObject()
user.name = 'mock_name'
user.id = 'mock_id'
user.url = 'mock_url'
self.user = user
if 'hub' not in kwargs:
hub = MockObject()
hub.public_host = 'mock_public_host'
hub.url = 'mock_url'
hub.base_url = 'mock_base_url'
hub.api_url = 'mock_api_url'
self.hub = hub
else:
# runs during normal execution only
# By now, all the traitlets have been set, so we can use them to compute
# other attributes
if self.__class__.executor is None:
self.__class__.executor = ThreadPoolExecutor(
max_workers=self.k8s_api_threadpool_workers
)
# This will start watching in __init__, so it'll start the first
# time any spawner object is created. Not ideal but works!
self._start_watching_pods()
if self.events_enabled:
self._start_watching_events()
self.api = shared_client('CoreV1Api')
# runs during both test and normal execution
self.pod_name = self._expand_user_properties(self.pod_name_template)
self.pvc_name = self._expand_user_properties(self.pvc_name_template)
if self.working_dir:
self.working_dir = self._expand_user_properties(self.working_dir)
if self.port == 0:
# Our default port is 8888
self.port = 8888
k8s_api_threadpool_workers = Integer(
# Set this explicitly, since this is the default in Python 3.5+
# but not in 3.4
5 * multiprocessing.cpu_count(),
config=True,
help="""
Number of threads in thread pool used to talk to the k8s API.
Increase this if you are dealing with a very large number of users.
Defaults to `5 * cpu_cores`, which is the default for `ThreadPoolExecutor`.
"""
)
events_enabled = Bool(
True,
config=True,
help="""
Enable event-watching for progress-reports to the user spawn page.
Disable if these events are not desirable
or to save some performance cost.
"""
)
namespace = Unicode(
config=True,
help="""
Kubernetes namespace to spawn user pods in.
If running inside a kubernetes cluster with service accounts enabled,
defaults to the current namespace. If not, defaults to `default`
"""
)
@default('namespace')
def _namespace_default(self):
"""
Set namespace default to current namespace if running in a k8s cluster
If not in a k8s cluster with service accounts enabled, default to
`default`
"""
ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'
if os.path.exists(ns_path):
with open(ns_path) as f:
return f.read().strip()
return 'default'
ip = Unicode(
'0.0.0.0',
config=True,
help="""
The IP address (or hostname) the single-user server should listen on.
We override this from the parent so we can set a more sane default for
the Kubernetes setup.
"""
)
cmd = Command(
None,
allow_none=True,
minlen=0,
config=True,
help="""
The command used for starting the single-user server.
Provide either a string or a list containing the path to the startup script command. Extra arguments,
other than this path, should be provided via `args`.
This is usually set if you want to start the single-user server in a different python
environment (with virtualenv/conda) than JupyterHub itself.
Some spawners allow shell-style expansion here, allowing you to use environment variables.
Most, including the default, do not. Consult the documentation for your spawner to verify!
If set to `None`, Kubernetes will start the `CMD` that is specified in the Docker image being started.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
working_dir = Unicode(
None,
allow_none=True,
config=True,
help="""
The working directory where the Notebook server will be started inside the container.
Defaults to `None` so the working directory will be the one defined in the Dockerfile.
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
service_account = Unicode(
None,
allow_none=True,
config=True,
help="""
The service account to be mounted in the spawned user pod.
When set to `None` (the default), no service account is mounted, and the default service account
is explicitly disabled.
This `serviceaccount` must already exist in the namespace the user pod is being spawned in.
WARNING: Be careful with this configuration! Make sure the service account being mounted
has the minimal permissions needed, and nothing more. When misconfigured, this can easily
give arbitrary users root over your entire cluster.
"""
)
pod_name_template = Unicode(
'jupyter-{username}{servername}',
config=True,
help="""
Template to use to form the name of user's pods.
`{username}` is expanded to the escaped, dns-label safe username.
This must be unique within the namespace the pods are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
"""
)
storage_pvc_ensure = Bool(
False,
config=True,
help="""
Ensure that a PVC exists for each user before spawning.
Set to true to create a PVC named with `pvc_name_template` if it does
not exist for the user when their pod is spawning.
"""
)
pvc_name_template = Unicode(
'claim-{username}{servername}',
config=True,
help="""
Template to use to form the name of user's pvc.
`{username}` is expanded to the escaped, dns-label safe username.
This must be unique within the namespace the pvc are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
hub_connect_ip = Unicode(
allow_none=True,
config=True,
help="""DEPRECATED. Use c.JupyterHub.hub_connect_ip"""
)
hub_connect_port = Integer(
config=True,
help="""DEPRECATED. Use c.JupyterHub.hub_connect_url"""
)
@observe('hub_connect_ip', 'hub_connect_port')
def _deprecated_changed(self, change):
warnings.warn("""
KubeSpawner.{0} is deprecated with JupyterHub >= 0.8.
Use JupyterHub.{0}
""".format(change.name),
DeprecationWarning)
setattr(self.hub, change.name.split('_', 1)[1], change.new)
common_labels = Dict(
{
'app': 'jupyterhub',
'heritage': 'jupyterhub',
},
config=True,
help="""
Kubernetes labels that both spawned singleuser server pods and created
user PVCs will get.
Note that these are only set when the Pods and PVCs are created, not
later when this setting is updated.
"""
)
extra_labels = Dict(
config=True,
help="""
Extra kubernetes labels to set on the spawned single-user pods.
The keys and values specified here would be set as labels on the spawned single-user
kubernetes pods. The keys and values must both be strings that match the kubernetes
label key / value constraints.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__
for more info on what labels are and why you might want to use them!
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
extra_annotations = Dict(
config=True,
help="""
Extra Kubernetes annotations to set on the spawned single-user pods.
The keys and values specified here are added as annotations on the spawned single-user
kubernetes pods. The keys and values must both be strings.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/>`__
for more info on what annotations are and why you might want to use them!
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
image = Unicode(
'jupyterhub/singleuser:latest',
config=True,
help="""
Docker image to use for spawning user's containers.
Defaults to `jupyterhub/singleuser:latest`
Name of the container + a tag, same as would be used with
a `docker pull` command. If tag is set to `latest`, kubernetes will
check the registry each time a new user is spawned to see if there
is a newer image available. If available, new image will be pulled.
Note that this could cause long delays when spawning, especially
if the image is large. If you do not specify a tag, whatever version
of the image is first pulled on the node will be used, thus possibly
leading to inconsistent images on different nodes. For all these
reasons, it is recommended to specify a specific immutable tag
for the image.
If your image is very large, you might need to increase the timeout
for starting the single user container from the default. You can
set this with::
c.KubeSpawner.start_timeout = 60 * 5 # Up to 5 minutes
"""
)
image_pull_policy = Unicode(
'IfNotPresent',
config=True,
help="""
The image pull policy of the docker container specified in
`image`.
Defaults to `IfNotPresent` which causes the Kubelet to NOT pull the image
specified in KubeSpawner.image if it already exists, except if the tag
is `:latest`. For more information on image pull policy,
refer to `the Kubernetes documentation <https://kubernetes.io/docs/concepts/containers/images/>`__.
This configuration is primarily used in development if you are
actively changing the `image_spec` and would like to pull the image
whenever a user container is spawned.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
image_pull_secrets = Unicode(
None,
allow_none=True,
config=True,
help="""
The kubernetes secret to use for pulling images from private repository.
Set this to the name of a Kubernetes secret containing the docker configuration
required to pull the image.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod>`__
for more information on when and why this might need to be set, and what
it should be set to.
"""
)
node_selector = Dict(
config=True,
help="""
The dictionary Selector labels used to match the Nodes where Pods will be launched.
Default is None and means it will be launched in any available Node.
For example to match the Nodes that have a label of `disktype: ssd` use::
c.KubeSpawner.node_selector = {'disktype': 'ssd'}
"""
)
uid = Union(
trait_types=[
Integer(),
Callable(),
],
allow_none=True,
config=True,
help="""
The UID to run the single-user server containers as.
This UID should ideally map to a user that already exists in the container
image being used. Running as root is discouraged.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable
will be called asynchronously if it returns a future. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
If set to `None`, the user specified with the `USER` directive in the
container metadata is used.
"""
)
gid = Union(
trait_types=[
Integer(),
Callable(),
],
allow_none=True,
config=True,
help="""
The GID to run the single-user server containers as.
This GID should ideally map to a group that already exists in the container
image being used. Running as root is discouraged.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable
will be called asynchronously if it returns a future. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
If set to `None`, the group of the user specified with the `USER` directive
in the container metadata is used.
"""
)
fs_gid = Union(
trait_types=[
Integer(),
Callable(),
],
allow_none=True,
config=True,
help="""
The GID of the group that should own any volumes that are created & mounted.
A special supplemental group that applies primarily to the volumes mounted
in the single-user server. In volumes from supported providers, the following
things happen:
1. The owning GID will be the this GID
2. The setgid bit is set (new files created in the volume will be owned by
this GID)
3. The permission bits are OR’d with rw-rw
The single-user server will also be run with this gid as part of its supplemental
groups.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable will
be called asynchronously if it returns a future, rather than an int. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
You'll *have* to set this if you are using auto-provisioned volumes with most
cloud providers. See `fsGroup <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podsecuritycontext-v1-core>`_
for more details.
"""
)
supplemental_gids = Union(
trait_types=[
List(),
Callable(),
],
config=True,
help="""
A list of GIDs that should be set as additional supplemental groups to the
user that the container runs as.
Instead of a list of integers, this could also be a callable that takes as one
parameter the current spawner instance and returns a list of integers. The
callable will be called asynchronously if it returns a future, rather than
a list. Note that the interface of the spawner class is not deemed stable
across versions, so using this functionality might cause your JupyterHub
or kubespawner upgrades to break.
You may have to set this if you are deploying to an environment with RBAC/SCC
enforced and pods run with a 'restricted' SCC which results in the image being
run as an assigned user ID. The supplemental group IDs would need to include
the corresponding group ID of the user ID the image normally would run as. The
image must setup all directories/files any application needs access to, as group
writable.
"""
)
privileged = Bool(
False,
config=True,
help="""
Whether to run the pod with a privileged security context.
"""
)
modify_pod_hook = Callable(
None,
allow_none=True,
config=True,
help="""
Callable to augment the Pod object before launching.
Expects a callable that takes two parameters:
1. The spawner object that is doing the spawning
2. The Pod object that is to be launched
You should modify the Pod object and return it.
This can be a coroutine if necessary. When set to none, no augmenting is done.
This is very useful if you want to modify the pod being launched dynamically.
Note that the spawner object can change between versions of KubeSpawner and JupyterHub,
so be careful relying on this!
"""
)
volumes = List(
config=True,
help="""
List of Kubernetes Volume specifications that will be mounted in the user pod.
This list will be directly added under `volumes` in the kubernetes pod spec,
so you should use the same structure. Each item in the list must have the
following two keys:
- `name`
Name that'll be later used in the `volume_mounts` config to mount this
volume at a specific path.
- `<name-of-a-supported-volume-type>` (such as `hostPath`, `persistentVolumeClaim`,
etc)
The key name determines the type of volume to mount, and the value should
be an object specifying the various options available for that kind of
volume.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/volumes>`__
for more information on the various kinds of volumes available and their options.
Your kubernetes cluster must already be configured to support the volume types you want to use.
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
volume_mounts = List(
config=True,
help="""
List of paths on which to mount volumes in the user notebook's pod.
This list will be added to the values of the `volumeMounts` key under the user's
container in the kubernetes pod spec, so you should use the same structure as that.
Each item in the list should be a dictionary with at least these two keys:
- `mountPath` The path on the container in which we want to mount the volume.
- `name` The name of the volume we want to mount, as specified in the `volumes` config.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/volumes>`__
for more information on how the `volumeMount` item works.
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
storage_capacity = Unicode(
None,
config=True,
allow_none=True,
help="""
The amount of storage space to request from the volume that the pvc will
mount to. This amount will be the amount of storage space the user has
to work with on their notebook. If left blank, the kubespawner will not
create a pvc for the pod.
This will be added to the `resources: requests: storage:` in the k8s pod spec.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims>`__
for more information on how storage works.
Quantities can be represented externally as unadorned integers, or as fixed-point
integers with one of these SI suffices (`E, P, T, G, M, K, m`) or their power-of-two
equivalents (`Ei, Pi, Ti, Gi, Mi, Ki`). For example, the following represent roughly
the same value: `128974848`, `129e6`, `129M`, `123Mi`.
"""
)
storage_extra_labels = Dict(
config=True,
help="""
Extra kubernetes labels to set on the user PVCs.
The keys and values specified here would be set as labels on the PVCs
created by kubespawner for the user. Note that these are only set
when the PVC is created, not later when this setting is updated.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__
for more info on what labels are and why you might want to use them!
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
storage_class = Unicode(
None,
config=True,
allow_none=True,
help="""
The storage class that the pvc will use.
This will be added to the `annotations: volume.beta.kubernetes.io/storage-class:`
in the pvc metadata.
This will determine what type of volume the pvc will request to use. If one exists
that matches the criteria of the StorageClass, the pvc will mount to that. Otherwise,
b/c it has a storage class, k8s will dynamically spawn a pv for the pvc to bind to
and a machine in the cluster for the pv to bind to.
Note that an empty string is a valid value and is always interpreted to be
requesting a pv with no class.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/storage-classes/>`__
for more information on how StorageClasses work.
"""
)
storage_access_modes = List(
["ReadWriteOnce"],
config=True,
help="""
List of access modes the user has for the pvc.
The access modes are:
- `ReadWriteOnce` – the volume can be mounted as read-write by a single node
- `ReadOnlyMany` – the volume can be mounted read-only by many nodes
- `ReadWriteMany` – the volume can be mounted as read-write by many nodes
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes>`__
for more information on how access modes work.
"""
)
storage_selector = Dict(
config=True,
help="""
The dictionary Selector labels used to match a PersistentVolumeClaim to
a PersistentVolume.
Default is None and means it will match based only on other storage criteria.
For example to match the Nodes that have a label of `content: jupyter` use::
c.KubeSpawner.storage_selector = {'matchLabels':{'content': 'jupyter'}}
"""
)
lifecycle_hooks = Dict(
config=True,
help="""
Kubernetes lifecycle hooks to set on the spawned single-user pods.
The keys is name of hooks and there are only two hooks, postStart and preStop.
The values are handler of hook which executes by Kubernetes management system when hook is called.
Below is an sample copied from
`the Kubernetes documentation <https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/>`__::
c.KubeSpawner.lifecycle_hooks = {
"postStart": {
"exec": {
"command": ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
}
},
"preStop": {
"exec": {
"command": ["/usr/sbin/nginx", "-s", "quit"]
}
}
}
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/>`__
for more info on what lifecycle hooks are and why you might want to use them!
"""
)
init_containers = List(
config=True,
help="""
List of initialization containers belonging to the pod.
This list will be directly added under `initContainers` in the kubernetes pod spec,
so you should use the same structure. Each item in the dict must a field
of the `V1Container specification <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#container-v1-core>`_.
One usage is disabling access to metadata service from single-user
notebook server with configuration below::
c.KubeSpawner.init_containers = [{
"name": "init-iptables",
"image": "<image with iptables installed>",
"command": ["iptables", "-A", "OUTPUT", "-p", "tcp", "--dport", "80", "-d", "169.254.169.254", "-j", "DROP"],
"securityContext": {
"capabilities": {
"add": ["NET_ADMIN"]
}
}
}]
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/workloads/pods/init-containers/>`__
for more info on what init containers are and why you might want to use them!
To user this feature, Kubernetes version must greater than 1.6.
"""
)
extra_container_config = Dict(
config=True,
help="""
Extra configuration (e.g. ``envFrom``) for notebook container which is not covered by other attributes.
This dict will be directly merge into `container` of notebook server,
so you should use the same structure. Each item in the dict must a field
of the `V1Container specification <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#container-v1-core>`_.
One usage is set ``envFrom`` on notebook container with configuration below::
c.KubeSpawner.extra_container_config = {
"envFrom": [{
"configMapRef": {
"name": "special-config"
}
}]
}
The key could be either a camelCase word (used by Kubernetes yaml, e.g.
``envFrom``) or a snake_case word (used by Kubernetes Python client,
e.g. ``env_from``).
"""
)
extra_pod_config = Dict(
config=True,
help="""
Extra configuration for the pod which is not covered by other attributes.
This dict will be directly merge into pod,so you should use the same structure.
Each item in the dict is field of pod configuration
which follows spec at https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core.
One usage is set restartPolicy and dnsPolicy with configuration below::
c.KubeSpawner.extra_pod_config = {
"restartPolicy": "OnFailure",
"dns_policy": "ClusterFirstWithHostNet"
}
The `key` could be either a camelCase word (used by Kubernetes yaml,
e.g. `restartPolicy`) or a snake_case word (used by Kubernetes Python
client, e.g. `dns_policy`).
"""
)
extra_containers = List(
config=True,
help="""
List of containers belonging to the pod which besides to the container generated for notebook server.
This list will be directly appended under `containers` in the kubernetes pod spec,
so you should use the same structure. Each item in the list is container configuration
which follows spec at https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#container-v1-core.
One usage is setting crontab in a container to clean sensitive data with configuration below::
c.KubeSpawner.extra_containers = [{
"name": "crontab",
"image": "supercronic",
"command": ["/usr/local/bin/supercronic", "/etc/crontab"]
}]
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
scheduler_name = Unicode(
None,
allow_none=True,
config=True,
help="""
Set the pod's scheduler explicitly by name. See `the Kubernetes documentation <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core>`__
for more information.
"""
)
tolerations = List(
config=True,
help="""
List of tolerations that are to be assigned to the pod in order to be able to schedule the pod
on a node with the corresponding taints. See the official Kubernetes documentation for additional details
https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
Pass this field an array of `"Toleration" objects
<https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#nodeselectorterm-v1-core>`__
Example::
[
{
'key': 'key',
'operator': 'Equal',
'value': 'value',
'effect': 'NoSchedule'
},
{
'key': 'key',
'operator': 'Exists',
'effect': 'NoSchedule'
}
]
"""
)
node_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PreferredSchedulingTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#preferredschedulingterm-v1-core
"""
)
node_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "NodeSelectorTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#nodeselectorterm-v1-core
"""
)
pod_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "WeightedPodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#weightedpodaffinityterm-v1-core
"""
)
pod_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podaffinityterm-v1-core
"""
)
pod_anti_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "WeightedPodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#weightedpodaffinityterm-v1-core
"""
)
pod_anti_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podaffinityterm-v1-core
"""
)
extra_resource_guarantees = Dict(
config=True,
help="""
The dictionary used to request arbitrary resources.
Default is None and means no additional resources are requested.
For example, to request 1 Nvidia GPUs::
c.KubeSpawner.extra_resource_guarantees = {"nvidia.com/gpu": "1"}
"""
)
extra_resource_limits = Dict(
config=True,
help="""
The dictionary used to limit arbitrary resources.
Default is None and means no additional resources are limited.
For example, to add a limit of 3 Nvidia GPUs::
c.KubeSpawner.extra_resource_limits = {"nvidia.com/gpu": "3"}
"""
)
delete_stopped_pods = Bool(
True,
config=True,
help="""
Whether to delete pods that have stopped themselves.
Set to False to leave stopped pods in the completed state,
allowing for easier debugging of why they may have stopped.
"""
)
profile_form_template = Unicode(
"""
<script>
// JupyterHub 0.8 applied form-control indisciminately to all form elements.
// Can be removed once we stop supporting JupyterHub 0.8
$(document).ready(function() {
$('#kubespawner-profiles-list input[type="radio"]').removeClass('form-control');
});
</script>
<style>
/* The profile description should not be bold, even though it is inside the <label> tag */
#kubespawner-profiles-list label p {
font-weight: normal;
}
</style>
<div class='form-group' id='kubespawner-profiles-list'>
{% for profile in profile_list %}
<label for='profile-item-{{ profile.slug }}' class='form-control input-group'>
<div class='col-md-1'>
<input type='radio' name='profile' id='profile-item-{{ profile.slug }}' value='{{ profile.slug }}' {% if profile.default %}checked{% endif %} />
</div>
<div class='col-md-11'>
<strong>{{ profile.display_name }}</strong>
{% if profile.description %}
<p>{{ profile.description }}</p>
{% endif %}
</div>
</label>
{% endfor %}
</div>
""",
config=True,
help="""
Jinja2 template for constructing profile list shown to user.
Used when `profile_list` is set.
The contents of `profile_list` are passed in to the template.
This should be used to construct the contents of a HTML form. When
posted, this form is expected to have an item with name `profile` and
the value the index of the profile in `profile_list`.
"""
)
profile_list = Union(
trait_types=[
List(trait=Dict()),
Callable()
],
config=True,
help="""
List of profiles to offer for selection by the user.
Signature is: `List(Dict())`, where each item is a dictionary that has two keys:
- `display_name`: the human readable display name (should be HTML safe)
- `slug`: the machine readable slug to identify the profile
(missing slugs are generated from display_name)
- `description`: Optional description of this profile displayed to the user.
- `kubespawner_override`: a dictionary with overrides to apply to the KubeSpawner
settings. Each value can be either the final value to change or a callable that
take the `KubeSpawner` instance as parameter and return the final value.
- `default`: (optional Bool) True if this is the default selected option
Example::
c.KubeSpawner.profile_list = [
{
'display_name': 'Training Env - Python',
'slug': 'training-python',
'default': True,
'kubespawner_override': {
'image': 'training/python:label',
'cpu_limit': 1,
'mem_limit': '512M',
}
}, {
'display_name': 'Training Env - Datascience',
'slug': 'training-datascience',
'kubespawner_override': {
'image': 'training/datascience:label',
'cpu_limit': 4,
'mem_limit': '8G',
}
}, {
'display_name': 'DataScience - Small instance',
'slug': 'datascience-small',
'kubespawner_override': {
'image': 'datascience/small:label',
'cpu_limit': 10,
'mem_limit': '16G',
}
}, {
'display_name': 'DataScience - Medium instance',
'slug': 'datascience-medium',
'kubespawner_override': {
'image': 'datascience/medium:label',
'cpu_limit': 48,
'mem_limit': '96G',
}
}, {
'display_name': 'DataScience - Medium instance (GPUx2)',
'slug': 'datascience-gpu2x',
'kubespawner_override': {
'image': 'datascience/medium:label',
'cpu_limit': 48,
'mem_limit': '96G',
'extra_resource_guarantees': {"nvidia.com/gpu": "2"},
}
}
]
Instead of a list of dictionaries, this could also be a callable that takes as one
parameter the current spawner instance and returns a list of dictionaries. The
callable will be called asynchronously if it returns a future, rather than
a list. Note that the interface of the spawner class is not deemed stable
across versions, so using this functionality might cause your JupyterHub
or kubespawner upgrades to break.
"""
)
priority_class_name = Unicode(
config=True,
help="""
The priority class that the pods will use.
See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption for
more information on how pod priority works.
"""
)
delete_grace_period = Integer(
1,
config=True,
help="""
Time in seconds for the pod to be in `terminating` state before is forcefully killed.
Increase this if you need more time to execute a `preStop` lifecycle hook.
See https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods for
more information on how pod termination works.
Defaults to `1`.
"""
)
# deprecate redundant and inconsistent singleuser_ and user_ prefixes:
_deprecated_traits_09 = [
"singleuser_working_dir",
"singleuser_service_account",
"singleuser_extra_labels",
"singleuser_extra_annotations",
"singleuser_image_spec",
"singleuser_image_pull_policy",
"singleuser_image_pull_secrets",
"singleuser_node_selector",
"singleuser_uid",
"singleuser_fs_gid",
"singleuser_supplemental_gids",
"singleuser_privileged",
"singleuser_lifecycle_hooks",
"singleuser_extra_pod_config",
"singleuser_init_containers",
"singleuser_extra_container_config",
"singleuser_extra_containers",
"user_storage_class",
"user_storage_pvc_ensure",
"user_storage_capacity",
"user_storage_extra_labels",
"user_storage_access_modes",
]
# other general deprecations:
_deprecated_traits = {
'image_spec': ('image', '0.10'),
}
# add the bulk deprecations from 0.9
for _deprecated_name in _deprecated_traits_09:
_new_name = _deprecated_name.split('_', 1)[1]
_deprecated_traits[_deprecated_name] = (_new_name, '0.9')
@validate('config')
def _handle_deprecated_config(self, proposal):
config = proposal.value
if 'KubeSpawner' not in config:
# nothing to check
return config
for _deprecated_name, (_new_name, version) in self._deprecated_traits.items():
# for any `singleuser_name` deprecate in favor of `name`
if _deprecated_name not in config.KubeSpawner:
# nothing to do
continue
# remove deprecated value from config
_deprecated_value = config.KubeSpawner.pop(_deprecated_name)
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s instead",
_deprecated_name,
version,
_new_name,
)
if _new_name in config.KubeSpawner:
# *both* config values found,
# ignore deprecated config and warn about the collision
_new_value = config.KubeSpawner[_new_name]
# ignore deprecated config in favor of non-deprecated config
self.log.warning(
"Ignoring deprecated config KubeSpawner.%s = %r "
" in favor of KubeSpawner.%s = %r",
_deprecated_name,
_deprecated_value,
_new_name,
_new_value,
)
else:
# move deprecated config to its new home
config.KubeSpawner[_new_name] = _deprecated_value
return config
# define properties for deprecated names
# so we can propagate their values to the new traits.
# most deprecations should be handled via config above,
# but in case these are set at runtime, e.g. by subclasses
# or hooks, hook this up.
# The signature-order of these is funny
# because the property methods are created with
# functools.partial(f, name) so name is passed as the first arg
# before self.
def _get_deprecated(name, new_name, version, self):
# warn about the deprecated name
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s",
name,
version,
new_name,
)
return getattr(self, new_name)
def _set_deprecated(name, new_name, version, self, value):
# warn about the deprecated name
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s",
name,
version,
new_name,
)
return setattr(self, new_name, value)
for _deprecated_name, (_new_name, _version) in _deprecated_traits.items():
exec(
"""{0} = property(
partial(_get_deprecated, '{0}', '{1}', '{2}'),
partial(_set_deprecated, '{0}', '{1}', '{2}'),
)
""".format(
_deprecated_name,
_new_name,
_version,
)
)
del _deprecated_name
def _expand_user_properties(self, template):
# Make sure username and servername match the restrictions for DNS labels
# Note: '-' is not in safe_chars, as it is being used as escape character
safe_chars = set(string.ascii_lowercase + string.digits)
# Set servername based on whether named-server initialised
if self.name:
# use two -- to ensure no collision possibilities
# are created by an ambiguous boundary between username and
# servername.
# -- cannot occur in a string where - is the escape char.
servername = '--{}'.format(self.name)
safe_servername = '--{}'.format(escapism.escape(self.name, safe=safe_chars, escape_char='-').lower())
else:
servername = ''
safe_servername = ''
legacy_escaped_username = ''.join([s if s in safe_chars else '-' for s in self.user.name.lower()])
safe_username = escapism.escape(self.user.name, safe=safe_chars, escape_char='-').lower()
return template.format(
userid=self.user.id,
username=safe_username,
unescaped_username=self.user.name,
legacy_escape_username=legacy_escaped_username,
servername=safe_servername,
unescaped_servername=servername,
)
def _expand_all(self, src):
if isinstance(src, list):
return [self._expand_all(i) for i in src]
elif isinstance(src, dict):
return {k: self._expand_all(v) for k, v in src.items()}
elif isinstance(src, str):
return self._expand_user_properties(src)
else:
return src
def _build_common_labels(self, extra_labels):
# Default set of labels, picked up from
# https://github.com/kubernetes/helm/blob/master/docs/chart_best_practices/labels.md
labels = {}
labels.update(extra_labels)
labels.update(self.common_labels)
return labels
def _build_pod_labels(self, extra_labels):
labels = self._build_common_labels(extra_labels)
labels.update({
'component': 'singleuser-server'
})
return labels
def _build_common_annotations(self, extra_annotations):
# Annotations don't need to be escaped
annotations = {
'hub.jupyter.org/username': self.user.name
}
if self.name:
annotations['hub.jupyter.org/servername'] = self.name
annotations.update(extra_annotations)
return annotations
@gen.coroutine
def get_pod_manifest(self):
"""
Make a pod manifest that will spawn current user's notebook pod.
"""
if callable(self.uid):
uid = yield gen.maybe_future(self.uid(self))
else:
uid = self.uid
if callable(self.gid):
gid = yield gen.maybe_future(self.gid(self))
else:
gid = self.gid
if callable(self.fs_gid):
fs_gid = yield gen.maybe_future(self.fs_gid(self))
else:
fs_gid = self.fs_gid
if callable(self.supplemental_gids):
supplemental_gids = yield gen.maybe_future(self.supplemental_gids(self))
else:
supplemental_gids = self.supplemental_gids
if self.cmd:
real_cmd = self.cmd + self.get_args()
else:
real_cmd = None
labels = self._build_pod_labels(self._expand_all(self.extra_labels))
annotations = self._build_common_annotations(self._expand_all(self.extra_annotations))
return make_pod(
name=self.pod_name,
cmd=real_cmd,
port=self.port,
image=self.image,
image_pull_policy=self.image_pull_policy,
image_pull_secret=self.image_pull_secrets,
node_selector=self.node_selector,
run_as_uid=uid,
run_as_gid=gid,
fs_gid=fs_gid,
supplemental_gids=supplemental_gids,
run_privileged=self.privileged,
env=self.get_env(),
volumes=self._expand_all(self.volumes),
volume_mounts=self._expand_all(self.volume_mounts),
working_dir=self.working_dir,
labels=labels,
annotations=annotations,
cpu_limit=self.cpu_limit,
cpu_guarantee=self.cpu_guarantee,
mem_limit=self.mem_limit,
mem_guarantee=self.mem_guarantee,
extra_resource_limits=self.extra_resource_limits,
extra_resource_guarantees=self.extra_resource_guarantees,
lifecycle_hooks=self.lifecycle_hooks,
init_containers=self._expand_all(self.init_containers),
service_account=self.service_account,
extra_container_config=self.extra_container_config,
extra_pod_config=self._expand_all(self.extra_pod_config),
extra_containers=self._expand_all(self.extra_containers),
scheduler_name=self.scheduler_name,
tolerations=self.tolerations,
node_affinity_preferred=self.node_affinity_preferred,
node_affinity_required=self.node_affinity_required,
pod_affinity_preferred=self.pod_affinity_preferred,
pod_affinity_required=self.pod_affinity_required,
pod_anti_affinity_preferred=self.pod_anti_affinity_preferred,
pod_anti_affinity_required=self.pod_anti_affinity_required,
priority_class_name=self.priority_class_name,
logger=self.log,
)
def get_pvc_manifest(self):
"""
Make a pvc manifest that will spawn current user's pvc.
"""
labels = self._build_common_labels(self._expand_all(self.storage_extra_labels))
labels.update({
'component': 'singleuser-storage'
})
annotations = self._build_common_annotations({})
return make_pvc(
name=self.pvc_name,
storage_class=self.storage_class,
access_modes=self.storage_access_modes,
selector=self.storage_selector,
storage=self.storage_capacity,
labels=labels,
annotations=annotations
)
def is_pod_running(self, pod):
"""
Check if the given pod is running
pod must be a dictionary representing a Pod kubernetes API object.
"""
# FIXME: Validate if this is really the best way
is_running = (
pod is not None and
pod.status.phase == 'Running' and
pod.status.pod_ip is not None and
pod.metadata.deletion_timestamp is None and
all([cs.ready for cs in pod.status.container_statuses])
)
return is_running
def get_state(self):
"""
Save state required to reinstate this user's pod from scratch
We save the `pod_name`, even though we could easily compute it,
because JupyterHub requires you save *some* state! Otherwise
it assumes your server is dead. This works around that.
It's also useful for cases when the `pod_template` changes between
restarts - this keeps the old pods around.
"""
state = super().get_state()
state['pod_name'] = self.pod_name
return state
def get_env(self):
"""Return the environment dict to use for the Spawner.
See also: jupyterhub.Spawner.get_env
"""
env = super(KubeSpawner, self).get_env()
# deprecate image
env['JUPYTER_IMAGE_SPEC'] = self.image
env['JUPYTER_IMAGE'] = self.image
return env
def load_state(self, state):
"""
Load state from storage required to reinstate this user's pod
Since this runs after `__init__`, this will override the generated `pod_name`
if there's one we have saved in state. These are the same in most cases,
but if the `pod_template` has changed in between restarts, it will no longer
be the case. This allows us to continue serving from the old pods with
the old names.
"""
if 'pod_name' in state:
self.pod_name = state['pod_name']
@gen.coroutine
def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
yield self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data.status.phase == 'Pending':
return None
ctr_stat = data.status.container_statuses
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c.name == 'notebook':
if c.state.terminated:
# call self.stop to delete the pod
if self.delete_stopped_pods:
yield self.stop(now=True)
return c.state.terminated.exit_code
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1
@run_on_executor
def asynchronize(self, method, *args, **kwargs):
return method(*args, **kwargs)
@property
def events(self):
"""Filter event-reflector to just this pods events
Returns list of all events that match our pod_name
since our ._last_event (if defined).
._last_event is set at the beginning of .start().
"""
if not self.event_reflector:
return []
events = []
for event in self.event_reflector.events:
if event.involved_object.name != self.pod_name:
# only consider events for my pod name
continue
if self._last_event and event.metadata.uid == self._last_event:
# saw last_event marker, ignore any previous events
# and only consider future events
# only include events *after* our _last_event marker
events = []
else:
events.append(event)
return events
@async_generator
async def progress(self):
"""
This function is reporting back the progress of spawning a pod until
self._start_future has fired.
This is working with events parsed by the python kubernetes client,
and here is the specification of events that is relevant to understand:
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#event-v1-core
"""
if not self.events_enabled:
return
self.log.debug('progress generator: %s', self.pod_name)
start_future = self._start_future
pod_id = None
progress = 0
next_event = 0
break_while_loop = False
while True:
# Ensure we always capture events following the start_future
# signal has fired.
if start_future.done():
break_while_loop = True
events = self.events
len_events = len(events)
if next_event < len_events:
# only show messages for the 'current' pod
# pod_id may change if a previous pod is being stopped
# before starting a new one
# use the uid of the latest event to identify 'current'
pod_id = events[-1].involved_object.uid
for i in range(next_event, len_events):
event = events[i]
# move the progress bar.
# Since we don't know how many events we will get,
# asymptotically approach 90% completion with each event.
# each event gets 33% closer to 90%:
# 30 50 63 72 78 82 84 86 87 88 88 89
progress += (90 - progress) / 3
# V1Event isn't serializable, and neither is the datetime
# objects within it, and we need what we pass back to be
# serializable to it can be sent back from JupyterHub to
# a browser wanting to display progress.
serializable_event = json.loads(
json.dumps(event.to_dict(), default=datetime.isoformat)
)
await yield_({
'progress': int(progress),
'raw_event': serializable_event,
'message': "%s [%s] %s" % (
event.last_timestamp or event.event_time,
event.type,
event.message,
)
})
next_event = len_events
if break_while_loop:
break
await sleep(1)
def _start_reflector(self, key, ReflectorClass, replace=False, **kwargs):
"""Start a shared reflector on the KubeSpawner class
key: key for the reflector (e.g. 'pod' or 'events')
Reflector: Reflector class to be instantiated
kwargs: extra keyword-args to be relayed to ReflectorClass
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
main_loop = IOLoop.current()
def on_reflector_failure():
self.log.critical(
"%s reflector failed, halting Hub.",
key.title(),
)
sys.exit(1)
previous_reflector = self.__class__.reflectors.get(key)
if replace or not previous_reflector:
self.__class__.reflectors[key] = ReflectorClass(
parent=self,
namespace=self.namespace,
on_failure=on_reflector_failure,
**kwargs,
)
if replace and previous_reflector:
# we replaced the reflector, stop the old one
previous_reflector.stop()
# return the current reflector
return self.__class__.reflectors[key]
def _start_watching_events(self, replace=False):
"""Start the events reflector
If replace=False and the event reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
return self._start_reflector(
"events",
EventReflector,
fields={"involvedObject.kind": "Pod"},
replace=replace,
)
def _start_watching_pods(self, replace=False):
"""Start the pod reflector
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
return self._start_reflector("pods", PodReflector, replace=replace)
# record a future for the call to .start()
# so we can use it to terminate .progress()
def start(self):
"""Thin wrapper around self._start
so we can hold onto a reference for the Future
start returns, which we can use to terminate
.progress()
"""
self._start_future = self._start()
return self._start_future
_last_event = None
@gen.coroutine
def _start(self):
"""Start the user's pod"""
# load user options (including profile)
yield self.load_user_options()
# record latest event so we don't include old
# events from previous pods in self.events
# track by order and name instead of uid
# so we get events like deletion of a previously stale
# pod if it's part of this spawn process
events = self.events
if events:
self._last_event = events[-1].metadata.uid
if self.storage_pvc_ensure:
# Try and create the pvc. If it succeeds we are good. If
# returns a 409 indicating it already exists we are good. If
# it returns a 403, indicating potential quota issue we need
# to see if pvc already exists before we decide to raise the
# error for quota being exceeded. This is because quota is
# checked before determining if the PVC needed to be
# created.
pvc = self.get_pvc_manifest()
try:
yield self.asynchronize(
self.api.create_namespaced_persistent_volume_claim,
namespace=self.namespace,
body=pvc
)
except ApiException as e:
if e.status == 409:
self.log.info("PVC " + self.pvc_name + " already exists, so did not create new pvc.")
elif e.status == 403:
t, v, tb = sys.exc_info()
try:
yield self.asynchronize(
self.api.read_namespaced_persistent_volume_claim,
name=self.pvc_name,
namespace=self.namespace)
except ApiException as e:
raise v.with_traceback(tb)
self.log.info("PVC " + self.pvc_name + " already exists, possibly have reached quota though.")
else:
raise
# If we run into a 409 Conflict error, it means a pod with the
# same name already exists. We stop it, wait for it to stop, and
# try again. We try 4 times, and if it still fails we give up.
# FIXME: Have better / cleaner retry logic!
retry_times = 4
pod = yield self.get_pod_manifest()
if self.modify_pod_hook:
pod = yield gen.maybe_future(self.modify_pod_hook(self, pod))
for i in range(retry_times):
try:
yield self.asynchronize(
self.api.create_namespaced_pod,
self.namespace,
pod,
)
break
except ApiException as e:
if e.status != 409:
# We only want to handle 409 conflict errors
self.log.exception("Failed for %s", pod.to_str())
raise
self.log.info('Found existing pod %s, attempting to kill', self.pod_name)
# TODO: this should show up in events
yield self.stop(True)
self.log.info('Killed pod %s, will try starting singleuser pod again', self.pod_name)
else:
raise Exception(
'Can not create user pod %s already exists & could not be deleted' % self.pod_name)
# we need a timeout here even though start itself has a timeout
# in order for this coroutine to finish at some point.
# using the same start_timeout here
# essentially ensures that this timeout should never propagate up
# because the handler will have stopped waiting after
# start_timeout, starting from a slightly earlier point.
try:
yield exponential_backoff(
lambda: self.is_pod_running(self.pod_reflector.pods.get(self.pod_name, None)),
'pod/%s did not start in %s seconds!' % (self.pod_name, self.start_timeout),
timeout=self.start_timeout,
)
except TimeoutError:
if self.pod_name not in self.pod_reflector.pods:
# if pod never showed up at all,
# restart the pod reflector which may have become disconnected.
self.log.error(
"Pod %s never showed up in reflector, restarting pod reflector",
self.pod_name,
)
self._start_watching_pods(replace=True)
raise
pod = self.pod_reflector.pods[self.pod_name]
self.pod_id = pod.metadata.uid
if self.event_reflector:
self.log.debug(
'pod %s events before launch: %s',
self.pod_name,
"\n".join(
[
"%s [%s] %s" % (event.last_timestamp or event.event_time, event.type, event.message)
for event in self.events
]
),
)
return (pod.status.pod_ip, self.port)
@gen.coroutine
def stop(self, now=False):
delete_options = client.V1DeleteOptions()
if now:
grace_seconds = 0
else:
grace_seconds = self.delete_grace_period
delete_options.grace_period_seconds = grace_seconds
self.log.info("Deleting pod %s", self.pod_name)
try:
yield self.asynchronize(
self.api.delete_namespaced_pod,
name=self.pod_name,
namespace=self.namespace,
body=delete_options,
grace_period_seconds=grace_seconds,
)
except ApiException as e:
if e.status == 404:
self.log.warning(
"No pod %s to delete. Assuming already deleted.",
self.pod_name,
)
else:
raise
try:
yield exponential_backoff(
lambda: self.pod_reflector.pods.get(self.pod_name, None) is None,
'pod/%s did not disappear in %s seconds!' % (self.pod_name, self.start_timeout),
timeout=self.start_timeout,
)
except TimeoutError:
self.log.error("Pod %s did not disappear, restarting pod reflector", self.pod_name)
self._start_watching_pods(replace=True)
raise
@default('env_keep')
def _env_keep_default(self):
return []
_profile_list = None
def _render_options_form(self, profile_list):
self._profile_list = self._init_profile_list(profile_list)
profile_form_template = Environment(loader=BaseLoader).from_string(self.profile_form_template)
return profile_form_template.render(profile_list=self._profile_list)
@gen.coroutine
def _render_options_form_dynamically(self, current_spawner):
profile_list = yield gen.maybe_future(self.profile_list(current_spawner))
profile_list = self._init_profile_list(profile_list)
return self._render_options_form(profile_list)
@default('options_form')
def _options_form_default(self):
'''
Build the form template according to the `profile_list` setting.
Returns:
'' when no `profile_list` has been defined
The rendered template (using jinja2) when `profile_list` is defined.
'''
if not self.profile_list:
return ''
if callable(self.profile_list):
return self._render_options_form_dynamically
else:
return self._render_options_form(self.profile_list)
def options_from_form(self, formdata):
"""get the option selected by the user on the form
This only constructs the user_options dict,
it should not actually load any options.
That is done later in `.load_user_options()`
Args:
formdata: user selection returned by the form
To access to the value, you can use the `get` accessor and the name of the html element,
for example::
formdata.get('profile',[0])
to get the value of the form named "profile", as defined in `form_template`::
<select class="form-control" name="profile"...>
</select>
Returns:
user_options (dict): the selected profile in the user_options form,
e.g. ``{"profile": "cpus-8"}``
"""
return {
'profile': formdata.get('profile', [None])[0]
}
@gen.coroutine
def _load_profile(self, slug):
"""Load a profile by name
Called by load_user_options
"""
# find the profile
default_profile = self._profile_list[0]
for profile in self._profile_list:
if profile.get('default', False):
# explicit default, not the first
default_profile = profile
if profile['slug'] == slug:
break
else:
if slug:
# name specified, but not found
raise ValueError("No such profile: %s. Options include: %s" % (
slug, ', '.join(p['slug'] for p in self._profile_list)
))
else:
# no name specified, use the default
profile = default_profile
self.log.debug("Applying KubeSpawner override for profile '%s'", profile['display_name'])
kubespawner_override = profile.get('kubespawner_override', {})
for k, v in kubespawner_override.items():
if callable(v):
v = v(self)
self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v)
else:
self.log.debug(".. overriding KubeSpawner value %s=%s", k, v)
setattr(self, k, v)
# set of recognised user option keys
# used for warning about ignoring unrecognised options
_user_option_keys = {'profile',}
def _init_profile_list(self, profile_list):
# generate missing slug fields from display_name
for profile in profile_list:
if 'slug' not in profile:
profile['slug'] = slugify(profile['display_name'])
return profile_list
@gen.coroutine
def load_user_options(self):
"""Load user options from self.user_options dict
This can be set via POST to the API or via options_from_form
Only supported argument by default is 'profile'.
Override in subclasses to support other options.
"""
if self._profile_list is None:
if callable(self.profile_list):
profile_list = yield gen.maybe_future(self.profile_list(self))
else:
profile_list = self.profile_list
self._profile_list = self._init_profile_list(profile_list)
selected_profile = self.user_options.get('profile', None)
if self._profile_list:
yield self._load_profile(selected_profile)
elif selected_profile:
self.log.warning("Profile %r requested, but profiles are not enabled", selected_profile)
# help debugging by logging any option fields that are not recognized
option_keys = set(self.user_options)
unrecognized_keys = option_keys.difference(self._user_option_keys)
if unrecognized_keys:
self.log.warning(
"Ignoring unrecognized KubeSpawner user_options: %s",
", ".join(
map(
str,
sorted(unrecognized_keys)
)
)
)
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4094_0 |
crossvul-python_data_bad_4366_4 | """
Custom Authenticator to use GitLab OAuth with JupyterHub
"""
import json
import os
import re
import sys
import warnings
from urllib.parse import quote
from tornado.auth import OAuth2Mixin
from tornado import web
from tornado.escape import url_escape
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from jupyterhub.auth import LocalAuthenticator
from traitlets import Set, CUnicode, Unicode, default, observe
from .oauth2 import OAuthLoginHandler, OAuthenticator
def _api_headers(access_token):
return {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}".format(access_token),
}
class GitLabOAuthenticator(OAuthenticator):
# see gitlab_scopes.md for details about scope config
# set scopes via config, e.g.
# c.GitLabOAuthenticator.scope = ['read_user']
_deprecated_aliases = {
"gitlab_group_whitelist": ("allowed_gitlab_groups", "0.12.0"),
"gitlab_project_id_whitelist": ("allowed_project_ids", "0.12.0")
}
@observe(*list(_deprecated_aliases))
def _deprecated_trait(self, change):
super()._deprecated_trait(change)
login_service = "GitLab"
client_id_env = 'GITLAB_CLIENT_ID'
client_secret_env = 'GITLAB_CLIENT_SECRET'
gitlab_url = Unicode("https://gitlab.com", config=True)
@default("gitlab_url")
def _default_gitlab_url(self):
"""get default gitlab url from env"""
gitlab_url = os.getenv('GITLAB_URL')
gitlab_host = os.getenv('GITLAB_HOST')
if not gitlab_url and gitlab_host:
warnings.warn(
'Use of GITLAB_HOST might be deprecated in the future. '
'Rename GITLAB_HOST environment variable to GITLAB_URL.',
PendingDeprecationWarning,
)
if gitlab_host.startswith(('https:', 'http:')):
gitlab_url = gitlab_host
else:
# Hides common mistake of users which set the GITLAB_HOST
# without a protocol specification.
gitlab_url = 'https://{0}'.format(gitlab_host)
warnings.warn(
'The https:// prefix has been added to GITLAB_HOST.'
'Set GITLAB_URL="{0}" instead.'.format(gitlab_host)
)
# default to gitlab.com
if not gitlab_url:
gitlab_url = 'https://gitlab.com'
return gitlab_url
gitlab_api_version = CUnicode('4', config=True)
@default('gitlab_api_version')
def _gitlab_api_version_default(self):
return os.environ.get('GITLAB_API_VERSION') or '4'
gitlab_api = Unicode(config=True)
@default("gitlab_api")
def _default_gitlab_api(self):
return '%s/api/v%s' % (self.gitlab_url, self.gitlab_api_version)
@default("authorize_url")
def _authorize_url_default(self):
return "%s/oauth/authorize" % self.gitlab_url
@default("token_url")
def _token_url_default(self):
return "%s/oauth/access_token" % self.gitlab_url
gitlab_group_whitelist = Set(help="Deprecated, use `GitLabOAuthenticator.allowed_gitlab_groups`", config=True,)
allowed_gitlab_groups = Set(
config=True, help="Automatically allow members of selected groups"
)
gitlab_project_id_whitelist = Set(help="Deprecated, use `GitLabOAuthenticator.allowed_project_ids`", config=True,)
allowed_project_ids = Set(
config=True,
help="Automatically allow members with Developer access to selected project ids",
)
gitlab_version = None
async def authenticate(self, handler, data=None):
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a GitLab Access Token
#
# See: https://github.com/gitlabhq/gitlabhq/blob/master/doc/api/oauth2.md
# GitLab specifies a POST request yet requires URL parameters
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
code=code,
grant_type="authorization_code",
redirect_uri=self.get_callback_url(handler),
)
validate_server_cert = self.validate_server_cert
url = url_concat("%s/oauth/token" % self.gitlab_url, params)
req = HTTPRequest(
url,
method="POST",
headers={"Accept": "application/json"},
validate_cert=validate_server_cert,
body='', # Body is required for a POST...
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
# memoize gitlab version for class lifetime
if self.gitlab_version is None:
self.gitlab_version = await self._get_gitlab_version(access_token)
self.member_api_variant = 'all/' if self.gitlab_version >= [12, 4] else ''
# Determine who the logged in user is
req = HTTPRequest(
"%s/user" % self.gitlab_api,
method="GET",
validate_cert=validate_server_cert,
headers=_api_headers(access_token),
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["username"]
user_id = resp_json["id"]
is_admin = resp_json.get("is_admin", False)
# Check if user is a member of any allowed groups or projects.
# These checks are performed here, as it requires `access_token`.
user_in_group = user_in_project = False
is_group_specified = is_project_id_specified = False
if self.allowed_gitlab_groups:
is_group_specified = True
user_in_group = await self._check_membership_allowed_groups(user_id, access_token)
# We skip project_id check if user is in allowed group.
if self.allowed_project_ids and not user_in_group:
is_project_id_specified = True
user_in_project = await self._check_membership_allowed_project_ids(
user_id, access_token
)
no_config_specified = not (is_group_specified or is_project_id_specified)
if (
(is_group_specified and user_in_group)
or (is_project_id_specified and user_in_project)
or no_config_specified
):
return {
'name': username,
'auth_state': {'access_token': access_token, 'gitlab_user': resp_json},
}
else:
self.log.warning("%s not in group or project allowed list", username)
return None
async def _get_gitlab_version(self, access_token):
url = '%s/version' % self.gitlab_api
req = HTTPRequest(
url,
method="GET",
headers=_api_headers(access_token),
validate_cert=self.validate_server_cert,
)
resp = await AsyncHTTPClient().fetch(req, raise_error=True)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
version_strings = resp_json['version'].split('-')[0].split('.')[:3]
version_ints = list(map(int, version_strings))
return version_ints
async def _check_membership_allowed_groups(self, user_id, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
# Check if user is a member of any group in the allowed list
for group in map(url_escape, self.allowed_gitlab_groups):
url = "%s/groups/%s/members/%s%d" % (
self.gitlab_api,
quote(group, safe=''),
self.member_api_variant,
user_id,
)
req = HTTPRequest(url, method="GET", headers=headers)
resp = await http_client.fetch(req, raise_error=False)
if resp.code == 200:
return True # user _is_ in group
return False
async def _check_membership_allowed_project_ids(self, user_id, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
# Check if user has developer access to any project in the allowed list
for project in self.allowed_project_ids:
url = "%s/projects/%s/members/%s%d" % (
self.gitlab_api,
project,
self.member_api_variant,
user_id,
)
req = HTTPRequest(url, method="GET", headers=headers)
resp = await http_client.fetch(req, raise_error=False)
if resp.body:
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_level = resp_json.get('access_level', 0)
# We only allow access level Developer and above
# Reference: https://docs.gitlab.com/ee/api/members.html
if resp.code == 200 and access_level >= 30:
return True
return False
class LocalGitLabOAuthenticator(LocalAuthenticator, GitLabOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4366_4 |
crossvul-python_data_good_4366_3 | """
Authenticator to use GitHub OAuth with JupyterHub
"""
import json
import os
import re
import string
import warnings
from tornado.auth import OAuth2Mixin
from tornado import web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient, HTTPError
from jupyterhub.auth import LocalAuthenticator
from traitlets import List, Set, Unicode, default, observe
from .common import next_page_from_links
from .oauth2 import OAuthLoginHandler, OAuthenticator
def _api_headers(access_token):
return {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "token {}".format(access_token),
}
class GitHubOAuthenticator(OAuthenticator):
# see github_scopes.md for details about scope config
# set scopes via config, e.g.
# c.GitHubOAuthenticator.scope = ['read:org']
_deprecated_oauth_aliases = {
"github_organization_whitelist": ("allowed_organizations", "0.12.0"),
**OAuthenticator._deprecated_oauth_aliases,
}
login_service = "GitHub"
github_url = Unicode("https://github.com", config=True)
@default("github_url")
def _github_url_default(self):
github_url = os.environ.get("GITHUB_URL")
if not github_url:
# fallback on older GITHUB_HOST config,
# treated the same as GITHUB_URL
host = os.environ.get("GITHUB_HOST")
if host:
if os.environ.get("GITHUB_HTTP"):
protocol = "http"
warnings.warn(
'Use of GITHUB_HOST with GITHUB_HTTP might be deprecated in the future. '
'Use GITHUB_URL=http://{} to set host and protocol together.'.format(
host
),
PendingDeprecationWarning,
)
else:
protocol = "https"
github_url = "{}://{}".format(protocol, host)
if github_url:
if '://' not in github_url:
# ensure protocol is included, assume https if missing
github_url = 'https://' + github_url
return github_url
else:
# nothing specified, this is the true default
github_url = "https://github.com"
# ensure no trailing slash
return github_url.rstrip("/")
github_api = Unicode("https://api.github.com", config=True)
@default("github_api")
def _github_api_default(self):
if self.github_url == "https://github.com":
return "https://api.github.com"
else:
return self.github_url + "/api/v3"
@default("authorize_url")
def _authorize_url_default(self):
return "%s/login/oauth/authorize" % (self.github_url)
@default("token_url")
def _token_url_default(self):
return "%s/login/oauth/access_token" % (self.github_url)
# deprecated names
github_client_id = Unicode(config=True, help="DEPRECATED")
def _github_client_id_changed(self, name, old, new):
self.log.warning("github_client_id is deprecated, use client_id")
self.client_id = new
github_client_secret = Unicode(config=True, help="DEPRECATED")
def _github_client_secret_changed(self, name, old, new):
self.log.warning("github_client_secret is deprecated, use client_secret")
self.client_secret = new
client_id_env = 'GITHUB_CLIENT_ID'
client_secret_env = 'GITHUB_CLIENT_SECRET'
github_organization_whitelist = Set(help="Deprecated, use `GitHubOAuthenticator.allowed_organizations`", config=True,)
allowed_organizations = Set(
config=True, help="Automatically allow members of selected organizations"
)
async def authenticate(self, handler, data=None):
"""We set up auth_state based on additional GitHub info if we
receive it.
"""
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a GitHub Access Token
#
# See: https://developer.github.com/v3/oauth/
# GitHub specifies a POST request yet requires URL parameters
params = dict(
client_id=self.client_id, client_secret=self.client_secret, code=code
)
url = url_concat(self.token_url, params)
req = HTTPRequest(
url,
method="POST",
headers={"Accept": "application/json"},
body='', # Body is required for a POST...
validate_cert=self.validate_server_cert,
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
if 'access_token' in resp_json:
access_token = resp_json['access_token']
elif 'error_description' in resp_json:
raise HTTPError(
403,
"An access token was not returned: {}".format(
resp_json['error_description']
),
)
else:
raise HTTPError(500, "Bad response: {}".format(resp))
# Determine who the logged in user is
req = HTTPRequest(
self.github_api + "/user",
method="GET",
headers=_api_headers(access_token),
validate_cert=self.validate_server_cert,
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["login"]
# username is now the GitHub userid.
if not username:
return None
# Check if user is a member of any allowed organizations.
# This check is performed here, as it requires `access_token`.
if self.allowed_organizations:
for org in self.allowed_organizations:
user_in_org = await self._check_membership_allowed_organizations(
org, username, access_token
)
if user_in_org:
break
else: # User not found in member list for any organisation
self.log.warning("User %s is not in allowed org list", username)
return None
userdict = {"name": username}
# Now we set up auth_state
userdict["auth_state"] = auth_state = {}
# Save the access token and full GitHub reply (name, id, email) in auth state
# These can be used for user provisioning in the Lab/Notebook environment.
# e.g.
# 1) stash the access token
# 2) use the GitHub ID as the id
# 3) set up name/email for .gitconfig
auth_state['access_token'] = access_token
# store the whole user model in auth_state.github_user
auth_state['github_user'] = resp_json
# A public email will return in the initial query (assuming default scope).
# Private will not.
return userdict
async def _check_membership_allowed_organizations(self, org, username, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
# Check membership of user `username` for organization `org` via api [check-membership](https://developer.github.com/v3/orgs/members/#check-membership)
# With empty scope (even if authenticated by an org member), this
# will only await public org members. You want 'read:org' in order
# to be able to iterate through all members.
check_membership_url = "%s/orgs/%s/members/%s" % (
self.github_api,
org,
username,
)
req = HTTPRequest(
check_membership_url,
method="GET",
headers=headers,
validate_cert=self.validate_server_cert,
)
self.log.debug(
"Checking GitHub organization membership: %s in %s?", username, org
)
resp = await http_client.fetch(req, raise_error=False)
print(resp)
if resp.code == 204:
self.log.info("Allowing %s as member of %s", username, org)
return True
else:
try:
resp_json = json.loads((resp.body or b'').decode('utf8', 'replace'))
message = resp_json.get('message', '')
except ValueError:
message = ''
self.log.debug(
"%s does not appear to be a member of %s (status=%s): %s",
username,
org,
resp.code,
message,
)
return False
class LocalGitHubOAuthenticator(LocalAuthenticator, GitHubOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4366_3 |
crossvul-python_data_good_4094_0 | """
JupyterHub Spawner to spawn user notebooks on a Kubernetes cluster.
This module exports `KubeSpawner` class, which is the actual spawner
implementation that should be used by JupyterHub.
"""
from functools import partial # noqa
from datetime import datetime
import json
import os
import sys
import string
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import warnings
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.concurrent import run_on_executor
from tornado import web
from traitlets import (
Bool,
Dict,
Integer,
List,
Unicode,
Union,
default,
observe,
validate,
)
from jupyterhub.spawner import Spawner
from jupyterhub.utils import exponential_backoff
from jupyterhub.traitlets import Command
from kubernetes.client.rest import ApiException
from kubernetes import client
import escapism
from jinja2 import Environment, BaseLoader
from .clients import shared_client
from kubespawner.traitlets import Callable
from kubespawner.objects import make_pod, make_pvc
from kubespawner.reflector import NamespacedResourceReflector
from asyncio import sleep
from async_generator import async_generator, yield_
from slugify import slugify
class PodReflector(NamespacedResourceReflector):
"""
PodReflector is merely a configured NamespacedResourceReflector. It exposes
the pods property, which is simply mapping to self.resources where the
NamespacedResourceReflector keeps an updated list of the resource defined by
the `kind` field and the `list_method_name` field.
"""
kind = 'pods'
list_method_name = 'list_namespaced_pod'
# FUTURE: These labels are the selection labels for the PodReflector. We
# might want to support multiple deployments in the same namespace, so we
# would need to select based on additional labels such as `app` and
# `release`.
labels = {
'component': 'singleuser-server',
}
@property
def pods(self):
"""
A dictionary of the python kubernetes client's representation of pods
for the namespace. The dictionary keys are the pod ids and the values
are the actual pod resource representations.
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#pod-v1-core
"""
return self.resources
class EventReflector(NamespacedResourceReflector):
"""
EventsReflector is merely a configured NamespacedResourceReflector. It
exposes the events property, which is simply mapping to self.resources where
the NamespacedResourceReflector keeps an updated list of the resource
defined by the `kind` field and the `list_method_name` field.
"""
kind = 'events'
list_method_name = 'list_namespaced_event'
@property
def events(self):
"""
Returns list of the python kubernetes client's representation of k8s
events within the namespace, sorted by the latest event.
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#event-v1-core
"""
# NOTE:
# - self.resources is a dictionary with keys mapping unique ids of
# Kubernetes Event resources, updated by NamespacedResourceReflector.
# self.resources will builds up with incoming k8s events, but can also
# suddenly refreshes itself entirely. We should not assume a call to
# this dictionary's values will result in a consistently ordered list,
# so we sort it to get it somewhat more structured.
# - We either seem to get only event.last_timestamp or event.event_time,
# both fields serve the same role but the former is a low resolution
# timestamp without and the other is a higher resolution timestamp.
return sorted(
self.resources.values(),
key=lambda event: event.last_timestamp or event.event_time,
)
class MockObject(object):
pass
class KubeSpawner(Spawner):
"""
A JupyterHub spawner that spawn pods in a Kubernetes Cluster. Each server
spawned by a user will have its own KubeSpawner instance.
"""
# We want to have one single threadpool executor that is shared across all
# KubeSpawner instances, so we apply a Singleton pattern. We initialize this
# class variable from the first KubeSpawner instance that is created and
# then reference it from all instances. The same goes for the PodReflector
# and EventReflector.
executor = None
reflectors = {
"pods": None,
"events": None,
}
@property
def pod_reflector(self):
"""
A convinience alias to the class variable reflectors['pods'].
"""
return self.__class__.reflectors['pods']
@property
def event_reflector(self):
"""
A convninience alias to the class variable reflectors['events'] if the
spawner instance has events_enabled.
"""
if self.events_enabled:
return self.__class__.reflectors['events']
def __init__(self, *args, **kwargs):
_mock = kwargs.pop('_mock', False)
super().__init__(*args, **kwargs)
if _mock:
# runs during test execution only
if 'user' not in kwargs:
user = MockObject()
user.name = 'mock_name'
user.id = 'mock_id'
user.url = 'mock_url'
self.user = user
if 'hub' not in kwargs:
hub = MockObject()
hub.public_host = 'mock_public_host'
hub.url = 'mock_url'
hub.base_url = 'mock_base_url'
hub.api_url = 'mock_api_url'
self.hub = hub
else:
# runs during normal execution only
# By now, all the traitlets have been set, so we can use them to compute
# other attributes
if self.__class__.executor is None:
self.__class__.executor = ThreadPoolExecutor(
max_workers=self.k8s_api_threadpool_workers
)
# This will start watching in __init__, so it'll start the first
# time any spawner object is created. Not ideal but works!
self._start_watching_pods()
if self.events_enabled:
self._start_watching_events()
self.api = shared_client('CoreV1Api')
# runs during both test and normal execution
self.pod_name = self._expand_user_properties(self.pod_name_template)
self.pvc_name = self._expand_user_properties(self.pvc_name_template)
if self.working_dir:
self.working_dir = self._expand_user_properties(self.working_dir)
if self.port == 0:
# Our default port is 8888
self.port = 8888
k8s_api_threadpool_workers = Integer(
# Set this explicitly, since this is the default in Python 3.5+
# but not in 3.4
5 * multiprocessing.cpu_count(),
config=True,
help="""
Number of threads in thread pool used to talk to the k8s API.
Increase this if you are dealing with a very large number of users.
Defaults to `5 * cpu_cores`, which is the default for `ThreadPoolExecutor`.
"""
)
events_enabled = Bool(
True,
config=True,
help="""
Enable event-watching for progress-reports to the user spawn page.
Disable if these events are not desirable
or to save some performance cost.
"""
)
namespace = Unicode(
config=True,
help="""
Kubernetes namespace to spawn user pods in.
If running inside a kubernetes cluster with service accounts enabled,
defaults to the current namespace. If not, defaults to `default`
"""
)
@default('namespace')
def _namespace_default(self):
"""
Set namespace default to current namespace if running in a k8s cluster
If not in a k8s cluster with service accounts enabled, default to
`default`
"""
ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'
if os.path.exists(ns_path):
with open(ns_path) as f:
return f.read().strip()
return 'default'
ip = Unicode(
'0.0.0.0',
config=True,
help="""
The IP address (or hostname) the single-user server should listen on.
We override this from the parent so we can set a more sane default for
the Kubernetes setup.
"""
)
cmd = Command(
None,
allow_none=True,
minlen=0,
config=True,
help="""
The command used for starting the single-user server.
Provide either a string or a list containing the path to the startup script command. Extra arguments,
other than this path, should be provided via `args`.
This is usually set if you want to start the single-user server in a different python
environment (with virtualenv/conda) than JupyterHub itself.
Some spawners allow shell-style expansion here, allowing you to use environment variables.
Most, including the default, do not. Consult the documentation for your spawner to verify!
If set to `None`, Kubernetes will start the `CMD` that is specified in the Docker image being started.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
working_dir = Unicode(
None,
allow_none=True,
config=True,
help="""
The working directory where the Notebook server will be started inside the container.
Defaults to `None` so the working directory will be the one defined in the Dockerfile.
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
service_account = Unicode(
None,
allow_none=True,
config=True,
help="""
The service account to be mounted in the spawned user pod.
When set to `None` (the default), no service account is mounted, and the default service account
is explicitly disabled.
This `serviceaccount` must already exist in the namespace the user pod is being spawned in.
WARNING: Be careful with this configuration! Make sure the service account being mounted
has the minimal permissions needed, and nothing more. When misconfigured, this can easily
give arbitrary users root over your entire cluster.
"""
)
pod_name_template = Unicode(
'jupyter-{username}--{servername}',
config=True,
help="""
Template to use to form the name of user's pods.
`{username}` is expanded to the escaped, dns-label-safe username.
`{servername}` is expanded to the escaped, dns-label-safe server name, if any.
Trailing `-` characters are stripped for safe handling of empty server names (user default servers).
This must be unique within the namespace the pods are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
.. versionchanged:: 0.12
`--` delimiter added to the template,
where it was implicitly added to the `servername` field before.
Additionally, `username--servername` delimiter was `-` instead of `--`,
allowing collisions in certain circumstances.
"""
)
storage_pvc_ensure = Bool(
False,
config=True,
help="""
Ensure that a PVC exists for each user before spawning.
Set to true to create a PVC named with `pvc_name_template` if it does
not exist for the user when their pod is spawning.
"""
)
pvc_name_template = Unicode(
'claim-{username}--{servername}',
config=True,
help="""
Template to use to form the name of user's pvc.
`{username}` is expanded to the escaped, dns-label safe username.
`{servername}` is expanded to the escaped, dns-label-safe server name, if any.
Trailing `-` characters are stripped for safe handling of empty server names (user default servers).
This must be unique within the namespace the pvc are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
.. versionchanged:: 0.12
`--` delimiter added to the template,
where it was implicitly added to the `servername` field before.
Additionally, `username--servername` delimiter was `-` instead of `--`,
allowing collisions in certain circumstances.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
hub_connect_ip = Unicode(
allow_none=True,
config=True,
help="""DEPRECATED. Use c.JupyterHub.hub_connect_ip"""
)
hub_connect_port = Integer(
config=True,
help="""DEPRECATED. Use c.JupyterHub.hub_connect_url"""
)
@observe('hub_connect_ip', 'hub_connect_port')
def _deprecated_changed(self, change):
warnings.warn("""
KubeSpawner.{0} is deprecated with JupyterHub >= 0.8.
Use JupyterHub.{0}
""".format(change.name),
DeprecationWarning)
setattr(self.hub, change.name.split('_', 1)[1], change.new)
common_labels = Dict(
{
'app': 'jupyterhub',
'heritage': 'jupyterhub',
},
config=True,
help="""
Kubernetes labels that both spawned singleuser server pods and created
user PVCs will get.
Note that these are only set when the Pods and PVCs are created, not
later when this setting is updated.
"""
)
extra_labels = Dict(
config=True,
help="""
Extra kubernetes labels to set on the spawned single-user pods.
The keys and values specified here would be set as labels on the spawned single-user
kubernetes pods. The keys and values must both be strings that match the kubernetes
label key / value constraints.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__
for more info on what labels are and why you might want to use them!
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
extra_annotations = Dict(
config=True,
help="""
Extra Kubernetes annotations to set on the spawned single-user pods.
The keys and values specified here are added as annotations on the spawned single-user
kubernetes pods. The keys and values must both be strings.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/>`__
for more info on what annotations are and why you might want to use them!
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
image = Unicode(
'jupyterhub/singleuser:latest',
config=True,
help="""
Docker image to use for spawning user's containers.
Defaults to `jupyterhub/singleuser:latest`
Name of the container + a tag, same as would be used with
a `docker pull` command. If tag is set to `latest`, kubernetes will
check the registry each time a new user is spawned to see if there
is a newer image available. If available, new image will be pulled.
Note that this could cause long delays when spawning, especially
if the image is large. If you do not specify a tag, whatever version
of the image is first pulled on the node will be used, thus possibly
leading to inconsistent images on different nodes. For all these
reasons, it is recommended to specify a specific immutable tag
for the image.
If your image is very large, you might need to increase the timeout
for starting the single user container from the default. You can
set this with::
c.KubeSpawner.start_timeout = 60 * 5 # Up to 5 minutes
"""
)
image_pull_policy = Unicode(
'IfNotPresent',
config=True,
help="""
The image pull policy of the docker container specified in
`image`.
Defaults to `IfNotPresent` which causes the Kubelet to NOT pull the image
specified in KubeSpawner.image if it already exists, except if the tag
is `:latest`. For more information on image pull policy,
refer to `the Kubernetes documentation <https://kubernetes.io/docs/concepts/containers/images/>`__.
This configuration is primarily used in development if you are
actively changing the `image_spec` and would like to pull the image
whenever a user container is spawned.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
image_pull_secrets = Unicode(
None,
allow_none=True,
config=True,
help="""
The kubernetes secret to use for pulling images from private repository.
Set this to the name of a Kubernetes secret containing the docker configuration
required to pull the image.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod>`__
for more information on when and why this might need to be set, and what
it should be set to.
"""
)
node_selector = Dict(
config=True,
help="""
The dictionary Selector labels used to match the Nodes where Pods will be launched.
Default is None and means it will be launched in any available Node.
For example to match the Nodes that have a label of `disktype: ssd` use::
c.KubeSpawner.node_selector = {'disktype': 'ssd'}
"""
)
uid = Union(
trait_types=[
Integer(),
Callable(),
],
allow_none=True,
config=True,
help="""
The UID to run the single-user server containers as.
This UID should ideally map to a user that already exists in the container
image being used. Running as root is discouraged.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable
will be called asynchronously if it returns a future. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
If set to `None`, the user specified with the `USER` directive in the
container metadata is used.
"""
)
gid = Union(
trait_types=[
Integer(),
Callable(),
],
allow_none=True,
config=True,
help="""
The GID to run the single-user server containers as.
This GID should ideally map to a group that already exists in the container
image being used. Running as root is discouraged.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable
will be called asynchronously if it returns a future. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
If set to `None`, the group of the user specified with the `USER` directive
in the container metadata is used.
"""
)
fs_gid = Union(
trait_types=[
Integer(),
Callable(),
],
allow_none=True,
config=True,
help="""
The GID of the group that should own any volumes that are created & mounted.
A special supplemental group that applies primarily to the volumes mounted
in the single-user server. In volumes from supported providers, the following
things happen:
1. The owning GID will be the this GID
2. The setgid bit is set (new files created in the volume will be owned by
this GID)
3. The permission bits are OR’d with rw-rw
The single-user server will also be run with this gid as part of its supplemental
groups.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable will
be called asynchronously if it returns a future, rather than an int. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
You'll *have* to set this if you are using auto-provisioned volumes with most
cloud providers. See `fsGroup <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podsecuritycontext-v1-core>`_
for more details.
"""
)
supplemental_gids = Union(
trait_types=[
List(),
Callable(),
],
config=True,
help="""
A list of GIDs that should be set as additional supplemental groups to the
user that the container runs as.
Instead of a list of integers, this could also be a callable that takes as one
parameter the current spawner instance and returns a list of integers. The
callable will be called asynchronously if it returns a future, rather than
a list. Note that the interface of the spawner class is not deemed stable
across versions, so using this functionality might cause your JupyterHub
or kubespawner upgrades to break.
You may have to set this if you are deploying to an environment with RBAC/SCC
enforced and pods run with a 'restricted' SCC which results in the image being
run as an assigned user ID. The supplemental group IDs would need to include
the corresponding group ID of the user ID the image normally would run as. The
image must setup all directories/files any application needs access to, as group
writable.
"""
)
privileged = Bool(
False,
config=True,
help="""
Whether to run the pod with a privileged security context.
"""
)
modify_pod_hook = Callable(
None,
allow_none=True,
config=True,
help="""
Callable to augment the Pod object before launching.
Expects a callable that takes two parameters:
1. The spawner object that is doing the spawning
2. The Pod object that is to be launched
You should modify the Pod object and return it.
This can be a coroutine if necessary. When set to none, no augmenting is done.
This is very useful if you want to modify the pod being launched dynamically.
Note that the spawner object can change between versions of KubeSpawner and JupyterHub,
so be careful relying on this!
"""
)
volumes = List(
config=True,
help="""
List of Kubernetes Volume specifications that will be mounted in the user pod.
This list will be directly added under `volumes` in the kubernetes pod spec,
so you should use the same structure. Each item in the list must have the
following two keys:
- `name`
Name that'll be later used in the `volume_mounts` config to mount this
volume at a specific path.
- `<name-of-a-supported-volume-type>` (such as `hostPath`, `persistentVolumeClaim`,
etc)
The key name determines the type of volume to mount, and the value should
be an object specifying the various options available for that kind of
volume.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/volumes>`__
for more information on the various kinds of volumes available and their options.
Your kubernetes cluster must already be configured to support the volume types you want to use.
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
volume_mounts = List(
config=True,
help="""
List of paths on which to mount volumes in the user notebook's pod.
This list will be added to the values of the `volumeMounts` key under the user's
container in the kubernetes pod spec, so you should use the same structure as that.
Each item in the list should be a dictionary with at least these two keys:
- `mountPath` The path on the container in which we want to mount the volume.
- `name` The name of the volume we want to mount, as specified in the `volumes` config.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/volumes>`__
for more information on how the `volumeMount` item works.
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
storage_capacity = Unicode(
None,
config=True,
allow_none=True,
help="""
The amount of storage space to request from the volume that the pvc will
mount to. This amount will be the amount of storage space the user has
to work with on their notebook. If left blank, the kubespawner will not
create a pvc for the pod.
This will be added to the `resources: requests: storage:` in the k8s pod spec.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims>`__
for more information on how storage works.
Quantities can be represented externally as unadorned integers, or as fixed-point
integers with one of these SI suffices (`E, P, T, G, M, K, m`) or their power-of-two
equivalents (`Ei, Pi, Ti, Gi, Mi, Ki`). For example, the following represent roughly
the same value: `128974848`, `129e6`, `129M`, `123Mi`.
"""
)
storage_extra_labels = Dict(
config=True,
help="""
Extra kubernetes labels to set on the user PVCs.
The keys and values specified here would be set as labels on the PVCs
created by kubespawner for the user. Note that these are only set
when the PVC is created, not later when this setting is updated.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__
for more info on what labels are and why you might want to use them!
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
storage_class = Unicode(
None,
config=True,
allow_none=True,
help="""
The storage class that the pvc will use.
This will be added to the `annotations: volume.beta.kubernetes.io/storage-class:`
in the pvc metadata.
This will determine what type of volume the pvc will request to use. If one exists
that matches the criteria of the StorageClass, the pvc will mount to that. Otherwise,
b/c it has a storage class, k8s will dynamically spawn a pv for the pvc to bind to
and a machine in the cluster for the pv to bind to.
Note that an empty string is a valid value and is always interpreted to be
requesting a pv with no class.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/storage-classes/>`__
for more information on how StorageClasses work.
"""
)
storage_access_modes = List(
["ReadWriteOnce"],
config=True,
help="""
List of access modes the user has for the pvc.
The access modes are:
- `ReadWriteOnce` – the volume can be mounted as read-write by a single node
- `ReadOnlyMany` – the volume can be mounted read-only by many nodes
- `ReadWriteMany` – the volume can be mounted as read-write by many nodes
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes>`__
for more information on how access modes work.
"""
)
storage_selector = Dict(
config=True,
help="""
The dictionary Selector labels used to match a PersistentVolumeClaim to
a PersistentVolume.
Default is None and means it will match based only on other storage criteria.
For example to match the Nodes that have a label of `content: jupyter` use::
c.KubeSpawner.storage_selector = {'matchLabels':{'content': 'jupyter'}}
"""
)
lifecycle_hooks = Dict(
config=True,
help="""
Kubernetes lifecycle hooks to set on the spawned single-user pods.
The keys is name of hooks and there are only two hooks, postStart and preStop.
The values are handler of hook which executes by Kubernetes management system when hook is called.
Below is an sample copied from
`the Kubernetes documentation <https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/>`__::
c.KubeSpawner.lifecycle_hooks = {
"postStart": {
"exec": {
"command": ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
}
},
"preStop": {
"exec": {
"command": ["/usr/sbin/nginx", "-s", "quit"]
}
}
}
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/>`__
for more info on what lifecycle hooks are and why you might want to use them!
"""
)
init_containers = List(
config=True,
help="""
List of initialization containers belonging to the pod.
This list will be directly added under `initContainers` in the kubernetes pod spec,
so you should use the same structure. Each item in the dict must a field
of the `V1Container specification <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#container-v1-core>`_.
One usage is disabling access to metadata service from single-user
notebook server with configuration below::
c.KubeSpawner.init_containers = [{
"name": "init-iptables",
"image": "<image with iptables installed>",
"command": ["iptables", "-A", "OUTPUT", "-p", "tcp", "--dport", "80", "-d", "169.254.169.254", "-j", "DROP"],
"securityContext": {
"capabilities": {
"add": ["NET_ADMIN"]
}
}
}]
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/workloads/pods/init-containers/>`__
for more info on what init containers are and why you might want to use them!
To user this feature, Kubernetes version must greater than 1.6.
"""
)
extra_container_config = Dict(
config=True,
help="""
Extra configuration (e.g. ``envFrom``) for notebook container which is not covered by other attributes.
This dict will be directly merge into `container` of notebook server,
so you should use the same structure. Each item in the dict must a field
of the `V1Container specification <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#container-v1-core>`_.
One usage is set ``envFrom`` on notebook container with configuration below::
c.KubeSpawner.extra_container_config = {
"envFrom": [{
"configMapRef": {
"name": "special-config"
}
}]
}
The key could be either a camelCase word (used by Kubernetes yaml, e.g.
``envFrom``) or a snake_case word (used by Kubernetes Python client,
e.g. ``env_from``).
"""
)
extra_pod_config = Dict(
config=True,
help="""
Extra configuration for the pod which is not covered by other attributes.
This dict will be directly merge into pod,so you should use the same structure.
Each item in the dict is field of pod configuration
which follows spec at https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core.
One usage is set restartPolicy and dnsPolicy with configuration below::
c.KubeSpawner.extra_pod_config = {
"restartPolicy": "OnFailure",
"dns_policy": "ClusterFirstWithHostNet"
}
The `key` could be either a camelCase word (used by Kubernetes yaml,
e.g. `restartPolicy`) or a snake_case word (used by Kubernetes Python
client, e.g. `dns_policy`).
"""
)
extra_containers = List(
config=True,
help="""
List of containers belonging to the pod which besides to the container generated for notebook server.
This list will be directly appended under `containers` in the kubernetes pod spec,
so you should use the same structure. Each item in the list is container configuration
which follows spec at https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#container-v1-core.
One usage is setting crontab in a container to clean sensitive data with configuration below::
c.KubeSpawner.extra_containers = [{
"name": "crontab",
"image": "supercronic",
"command": ["/usr/local/bin/supercronic", "/etc/crontab"]
}]
`{username}` is expanded to the escaped, dns-label safe username.
"""
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
scheduler_name = Unicode(
None,
allow_none=True,
config=True,
help="""
Set the pod's scheduler explicitly by name. See `the Kubernetes documentation <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core>`__
for more information.
"""
)
tolerations = List(
config=True,
help="""
List of tolerations that are to be assigned to the pod in order to be able to schedule the pod
on a node with the corresponding taints. See the official Kubernetes documentation for additional details
https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
Pass this field an array of `"Toleration" objects
<https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#nodeselectorterm-v1-core>`__
Example::
[
{
'key': 'key',
'operator': 'Equal',
'value': 'value',
'effect': 'NoSchedule'
},
{
'key': 'key',
'operator': 'Exists',
'effect': 'NoSchedule'
}
]
"""
)
node_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PreferredSchedulingTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#preferredschedulingterm-v1-core
"""
)
node_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "NodeSelectorTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#nodeselectorterm-v1-core
"""
)
pod_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "WeightedPodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#weightedpodaffinityterm-v1-core
"""
)
pod_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podaffinityterm-v1-core
"""
)
pod_anti_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "WeightedPodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#weightedpodaffinityterm-v1-core
"""
)
pod_anti_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podaffinityterm-v1-core
"""
)
extra_resource_guarantees = Dict(
config=True,
help="""
The dictionary used to request arbitrary resources.
Default is None and means no additional resources are requested.
For example, to request 1 Nvidia GPUs::
c.KubeSpawner.extra_resource_guarantees = {"nvidia.com/gpu": "1"}
"""
)
extra_resource_limits = Dict(
config=True,
help="""
The dictionary used to limit arbitrary resources.
Default is None and means no additional resources are limited.
For example, to add a limit of 3 Nvidia GPUs::
c.KubeSpawner.extra_resource_limits = {"nvidia.com/gpu": "3"}
"""
)
delete_stopped_pods = Bool(
True,
config=True,
help="""
Whether to delete pods that have stopped themselves.
Set to False to leave stopped pods in the completed state,
allowing for easier debugging of why they may have stopped.
"""
)
profile_form_template = Unicode(
"""
<script>
// JupyterHub 0.8 applied form-control indisciminately to all form elements.
// Can be removed once we stop supporting JupyterHub 0.8
$(document).ready(function() {
$('#kubespawner-profiles-list input[type="radio"]').removeClass('form-control');
});
</script>
<style>
/* The profile description should not be bold, even though it is inside the <label> tag */
#kubespawner-profiles-list label p {
font-weight: normal;
}
</style>
<div class='form-group' id='kubespawner-profiles-list'>
{% for profile in profile_list %}
<label for='profile-item-{{ profile.slug }}' class='form-control input-group'>
<div class='col-md-1'>
<input type='radio' name='profile' id='profile-item-{{ profile.slug }}' value='{{ profile.slug }}' {% if profile.default %}checked{% endif %} />
</div>
<div class='col-md-11'>
<strong>{{ profile.display_name }}</strong>
{% if profile.description %}
<p>{{ profile.description }}</p>
{% endif %}
</div>
</label>
{% endfor %}
</div>
""",
config=True,
help="""
Jinja2 template for constructing profile list shown to user.
Used when `profile_list` is set.
The contents of `profile_list` are passed in to the template.
This should be used to construct the contents of a HTML form. When
posted, this form is expected to have an item with name `profile` and
the value the index of the profile in `profile_list`.
"""
)
profile_list = Union(
trait_types=[
List(trait=Dict()),
Callable()
],
config=True,
help="""
List of profiles to offer for selection by the user.
Signature is: `List(Dict())`, where each item is a dictionary that has two keys:
- `display_name`: the human readable display name (should be HTML safe)
- `slug`: the machine readable slug to identify the profile
(missing slugs are generated from display_name)
- `description`: Optional description of this profile displayed to the user.
- `kubespawner_override`: a dictionary with overrides to apply to the KubeSpawner
settings. Each value can be either the final value to change or a callable that
take the `KubeSpawner` instance as parameter and return the final value.
- `default`: (optional Bool) True if this is the default selected option
Example::
c.KubeSpawner.profile_list = [
{
'display_name': 'Training Env - Python',
'slug': 'training-python',
'default': True,
'kubespawner_override': {
'image': 'training/python:label',
'cpu_limit': 1,
'mem_limit': '512M',
}
}, {
'display_name': 'Training Env - Datascience',
'slug': 'training-datascience',
'kubespawner_override': {
'image': 'training/datascience:label',
'cpu_limit': 4,
'mem_limit': '8G',
}
}, {
'display_name': 'DataScience - Small instance',
'slug': 'datascience-small',
'kubespawner_override': {
'image': 'datascience/small:label',
'cpu_limit': 10,
'mem_limit': '16G',
}
}, {
'display_name': 'DataScience - Medium instance',
'slug': 'datascience-medium',
'kubespawner_override': {
'image': 'datascience/medium:label',
'cpu_limit': 48,
'mem_limit': '96G',
}
}, {
'display_name': 'DataScience - Medium instance (GPUx2)',
'slug': 'datascience-gpu2x',
'kubespawner_override': {
'image': 'datascience/medium:label',
'cpu_limit': 48,
'mem_limit': '96G',
'extra_resource_guarantees': {"nvidia.com/gpu": "2"},
}
}
]
Instead of a list of dictionaries, this could also be a callable that takes as one
parameter the current spawner instance and returns a list of dictionaries. The
callable will be called asynchronously if it returns a future, rather than
a list. Note that the interface of the spawner class is not deemed stable
across versions, so using this functionality might cause your JupyterHub
or kubespawner upgrades to break.
"""
)
priority_class_name = Unicode(
config=True,
help="""
The priority class that the pods will use.
See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption for
more information on how pod priority works.
"""
)
delete_grace_period = Integer(
1,
config=True,
help="""
Time in seconds for the pod to be in `terminating` state before is forcefully killed.
Increase this if you need more time to execute a `preStop` lifecycle hook.
See https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods for
more information on how pod termination works.
Defaults to `1`.
"""
)
# deprecate redundant and inconsistent singleuser_ and user_ prefixes:
_deprecated_traits_09 = [
"singleuser_working_dir",
"singleuser_service_account",
"singleuser_extra_labels",
"singleuser_extra_annotations",
"singleuser_image_spec",
"singleuser_image_pull_policy",
"singleuser_image_pull_secrets",
"singleuser_node_selector",
"singleuser_uid",
"singleuser_fs_gid",
"singleuser_supplemental_gids",
"singleuser_privileged",
"singleuser_lifecycle_hooks",
"singleuser_extra_pod_config",
"singleuser_init_containers",
"singleuser_extra_container_config",
"singleuser_extra_containers",
"user_storage_class",
"user_storage_pvc_ensure",
"user_storage_capacity",
"user_storage_extra_labels",
"user_storage_access_modes",
]
# other general deprecations:
_deprecated_traits = {
'image_spec': ('image', '0.10'),
}
# add the bulk deprecations from 0.9
for _deprecated_name in _deprecated_traits_09:
_new_name = _deprecated_name.split('_', 1)[1]
_deprecated_traits[_deprecated_name] = (_new_name, '0.9')
@validate('config')
def _handle_deprecated_config(self, proposal):
config = proposal.value
if 'KubeSpawner' not in config:
# nothing to check
return config
for _deprecated_name, (_new_name, version) in self._deprecated_traits.items():
# for any `singleuser_name` deprecate in favor of `name`
if _deprecated_name not in config.KubeSpawner:
# nothing to do
continue
# remove deprecated value from config
_deprecated_value = config.KubeSpawner.pop(_deprecated_name)
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s instead",
_deprecated_name,
version,
_new_name,
)
if _new_name in config.KubeSpawner:
# *both* config values found,
# ignore deprecated config and warn about the collision
_new_value = config.KubeSpawner[_new_name]
# ignore deprecated config in favor of non-deprecated config
self.log.warning(
"Ignoring deprecated config KubeSpawner.%s = %r "
" in favor of KubeSpawner.%s = %r",
_deprecated_name,
_deprecated_value,
_new_name,
_new_value,
)
else:
# move deprecated config to its new home
config.KubeSpawner[_new_name] = _deprecated_value
return config
# define properties for deprecated names
# so we can propagate their values to the new traits.
# most deprecations should be handled via config above,
# but in case these are set at runtime, e.g. by subclasses
# or hooks, hook this up.
# The signature-order of these is funny
# because the property methods are created with
# functools.partial(f, name) so name is passed as the first arg
# before self.
def _get_deprecated(name, new_name, version, self):
# warn about the deprecated name
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s",
name,
version,
new_name,
)
return getattr(self, new_name)
def _set_deprecated(name, new_name, version, self, value):
# warn about the deprecated name
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s",
name,
version,
new_name,
)
return setattr(self, new_name, value)
for _deprecated_name, (_new_name, _version) in _deprecated_traits.items():
exec(
"""{0} = property(
partial(_get_deprecated, '{0}', '{1}', '{2}'),
partial(_set_deprecated, '{0}', '{1}', '{2}'),
)
""".format(
_deprecated_name,
_new_name,
_version,
)
)
del _deprecated_name
def _expand_user_properties(self, template):
# Make sure username and servername match the restrictions for DNS labels
# Note: '-' is not in safe_chars, as it is being used as escape character
safe_chars = set(string.ascii_lowercase + string.digits)
raw_servername = self.name or ''
safe_servername = escapism.escape(raw_servername, safe=safe_chars, escape_char='-').lower()
legacy_escaped_username = ''.join([s if s in safe_chars else '-' for s in self.user.name.lower()])
safe_username = escapism.escape(self.user.name, safe=safe_chars, escape_char='-').lower()
rendered = template.format(
userid=self.user.id,
username=safe_username,
unescaped_username=self.user.name,
legacy_escape_username=legacy_escaped_username,
servername=safe_servername,
unescaped_servername=raw_servername,
)
# strip trailing - delimiter in case of empty servername.
# k8s object names cannot have trailing -
return rendered.rstrip("-")
def _expand_all(self, src):
if isinstance(src, list):
return [self._expand_all(i) for i in src]
elif isinstance(src, dict):
return {k: self._expand_all(v) for k, v in src.items()}
elif isinstance(src, str):
return self._expand_user_properties(src)
else:
return src
def _build_common_labels(self, extra_labels):
# Default set of labels, picked up from
# https://github.com/kubernetes/helm/blob/master/docs/chart_best_practices/labels.md
labels = {}
labels.update(extra_labels)
labels.update(self.common_labels)
return labels
def _build_pod_labels(self, extra_labels):
labels = self._build_common_labels(extra_labels)
labels.update({
'component': 'singleuser-server'
})
return labels
def _build_common_annotations(self, extra_annotations):
# Annotations don't need to be escaped
annotations = {
'hub.jupyter.org/username': self.user.name
}
if self.name:
annotations['hub.jupyter.org/servername'] = self.name
annotations.update(extra_annotations)
return annotations
@gen.coroutine
def get_pod_manifest(self):
"""
Make a pod manifest that will spawn current user's notebook pod.
"""
if callable(self.uid):
uid = yield gen.maybe_future(self.uid(self))
else:
uid = self.uid
if callable(self.gid):
gid = yield gen.maybe_future(self.gid(self))
else:
gid = self.gid
if callable(self.fs_gid):
fs_gid = yield gen.maybe_future(self.fs_gid(self))
else:
fs_gid = self.fs_gid
if callable(self.supplemental_gids):
supplemental_gids = yield gen.maybe_future(self.supplemental_gids(self))
else:
supplemental_gids = self.supplemental_gids
if self.cmd:
real_cmd = self.cmd + self.get_args()
else:
real_cmd = None
labels = self._build_pod_labels(self._expand_all(self.extra_labels))
annotations = self._build_common_annotations(self._expand_all(self.extra_annotations))
return make_pod(
name=self.pod_name,
cmd=real_cmd,
port=self.port,
image=self.image,
image_pull_policy=self.image_pull_policy,
image_pull_secret=self.image_pull_secrets,
node_selector=self.node_selector,
run_as_uid=uid,
run_as_gid=gid,
fs_gid=fs_gid,
supplemental_gids=supplemental_gids,
run_privileged=self.privileged,
env=self.get_env(),
volumes=self._expand_all(self.volumes),
volume_mounts=self._expand_all(self.volume_mounts),
working_dir=self.working_dir,
labels=labels,
annotations=annotations,
cpu_limit=self.cpu_limit,
cpu_guarantee=self.cpu_guarantee,
mem_limit=self.mem_limit,
mem_guarantee=self.mem_guarantee,
extra_resource_limits=self.extra_resource_limits,
extra_resource_guarantees=self.extra_resource_guarantees,
lifecycle_hooks=self.lifecycle_hooks,
init_containers=self._expand_all(self.init_containers),
service_account=self.service_account,
extra_container_config=self.extra_container_config,
extra_pod_config=self._expand_all(self.extra_pod_config),
extra_containers=self._expand_all(self.extra_containers),
scheduler_name=self.scheduler_name,
tolerations=self.tolerations,
node_affinity_preferred=self.node_affinity_preferred,
node_affinity_required=self.node_affinity_required,
pod_affinity_preferred=self.pod_affinity_preferred,
pod_affinity_required=self.pod_affinity_required,
pod_anti_affinity_preferred=self.pod_anti_affinity_preferred,
pod_anti_affinity_required=self.pod_anti_affinity_required,
priority_class_name=self.priority_class_name,
logger=self.log,
)
def get_pvc_manifest(self):
"""
Make a pvc manifest that will spawn current user's pvc.
"""
labels = self._build_common_labels(self._expand_all(self.storage_extra_labels))
labels.update({
'component': 'singleuser-storage'
})
annotations = self._build_common_annotations({})
return make_pvc(
name=self.pvc_name,
storage_class=self.storage_class,
access_modes=self.storage_access_modes,
selector=self.storage_selector,
storage=self.storage_capacity,
labels=labels,
annotations=annotations
)
def is_pod_running(self, pod):
"""
Check if the given pod is running
pod must be a dictionary representing a Pod kubernetes API object.
"""
# FIXME: Validate if this is really the best way
is_running = (
pod is not None and
pod.status.phase == 'Running' and
pod.status.pod_ip is not None and
pod.metadata.deletion_timestamp is None and
all([cs.ready for cs in pod.status.container_statuses])
)
return is_running
def get_state(self):
"""
Save state required to reinstate this user's pod from scratch
We save the `pod_name`, even though we could easily compute it,
because JupyterHub requires you save *some* state! Otherwise
it assumes your server is dead. This works around that.
It's also useful for cases when the `pod_template` changes between
restarts - this keeps the old pods around.
"""
state = super().get_state()
state['pod_name'] = self.pod_name
return state
def get_env(self):
"""Return the environment dict to use for the Spawner.
See also: jupyterhub.Spawner.get_env
"""
env = super(KubeSpawner, self).get_env()
# deprecate image
env['JUPYTER_IMAGE_SPEC'] = self.image
env['JUPYTER_IMAGE'] = self.image
return env
def load_state(self, state):
"""
Load state from storage required to reinstate this user's pod
Since this runs after `__init__`, this will override the generated `pod_name`
if there's one we have saved in state. These are the same in most cases,
but if the `pod_template` has changed in between restarts, it will no longer
be the case. This allows us to continue serving from the old pods with
the old names.
"""
if 'pod_name' in state:
self.pod_name = state['pod_name']
@gen.coroutine
def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
yield self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data.status.phase == 'Pending':
return None
ctr_stat = data.status.container_statuses
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c.name == 'notebook':
if c.state.terminated:
# call self.stop to delete the pod
if self.delete_stopped_pods:
yield self.stop(now=True)
return c.state.terminated.exit_code
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1
@run_on_executor
def asynchronize(self, method, *args, **kwargs):
return method(*args, **kwargs)
@property
def events(self):
"""Filter event-reflector to just this pods events
Returns list of all events that match our pod_name
since our ._last_event (if defined).
._last_event is set at the beginning of .start().
"""
if not self.event_reflector:
return []
events = []
for event in self.event_reflector.events:
if event.involved_object.name != self.pod_name:
# only consider events for my pod name
continue
if self._last_event and event.metadata.uid == self._last_event:
# saw last_event marker, ignore any previous events
# and only consider future events
# only include events *after* our _last_event marker
events = []
else:
events.append(event)
return events
@async_generator
async def progress(self):
"""
This function is reporting back the progress of spawning a pod until
self._start_future has fired.
This is working with events parsed by the python kubernetes client,
and here is the specification of events that is relevant to understand:
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#event-v1-core
"""
if not self.events_enabled:
return
self.log.debug('progress generator: %s', self.pod_name)
start_future = self._start_future
pod_id = None
progress = 0
next_event = 0
break_while_loop = False
while True:
# Ensure we always capture events following the start_future
# signal has fired.
if start_future.done():
break_while_loop = True
events = self.events
len_events = len(events)
if next_event < len_events:
# only show messages for the 'current' pod
# pod_id may change if a previous pod is being stopped
# before starting a new one
# use the uid of the latest event to identify 'current'
pod_id = events[-1].involved_object.uid
for i in range(next_event, len_events):
event = events[i]
# move the progress bar.
# Since we don't know how many events we will get,
# asymptotically approach 90% completion with each event.
# each event gets 33% closer to 90%:
# 30 50 63 72 78 82 84 86 87 88 88 89
progress += (90 - progress) / 3
# V1Event isn't serializable, and neither is the datetime
# objects within it, and we need what we pass back to be
# serializable to it can be sent back from JupyterHub to
# a browser wanting to display progress.
serializable_event = json.loads(
json.dumps(event.to_dict(), default=datetime.isoformat)
)
await yield_({
'progress': int(progress),
'raw_event': serializable_event,
'message': "%s [%s] %s" % (
event.last_timestamp or event.event_time,
event.type,
event.message,
)
})
next_event = len_events
if break_while_loop:
break
await sleep(1)
def _start_reflector(self, key, ReflectorClass, replace=False, **kwargs):
"""Start a shared reflector on the KubeSpawner class
key: key for the reflector (e.g. 'pod' or 'events')
Reflector: Reflector class to be instantiated
kwargs: extra keyword-args to be relayed to ReflectorClass
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
main_loop = IOLoop.current()
def on_reflector_failure():
self.log.critical(
"%s reflector failed, halting Hub.",
key.title(),
)
sys.exit(1)
previous_reflector = self.__class__.reflectors.get(key)
if replace or not previous_reflector:
self.__class__.reflectors[key] = ReflectorClass(
parent=self,
namespace=self.namespace,
on_failure=on_reflector_failure,
**kwargs,
)
if replace and previous_reflector:
# we replaced the reflector, stop the old one
previous_reflector.stop()
# return the current reflector
return self.__class__.reflectors[key]
def _start_watching_events(self, replace=False):
"""Start the events reflector
If replace=False and the event reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
return self._start_reflector(
"events",
EventReflector,
fields={"involvedObject.kind": "Pod"},
replace=replace,
)
def _start_watching_pods(self, replace=False):
"""Start the pod reflector
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
return self._start_reflector("pods", PodReflector, replace=replace)
# record a future for the call to .start()
# so we can use it to terminate .progress()
def start(self):
"""Thin wrapper around self._start
so we can hold onto a reference for the Future
start returns, which we can use to terminate
.progress()
"""
self._start_future = self._start()
return self._start_future
_last_event = None
@gen.coroutine
def _start(self):
"""Start the user's pod"""
# load user options (including profile)
yield self.load_user_options()
# record latest event so we don't include old
# events from previous pods in self.events
# track by order and name instead of uid
# so we get events like deletion of a previously stale
# pod if it's part of this spawn process
events = self.events
if events:
self._last_event = events[-1].metadata.uid
if self.storage_pvc_ensure:
# Try and create the pvc. If it succeeds we are good. If
# returns a 409 indicating it already exists we are good. If
# it returns a 403, indicating potential quota issue we need
# to see if pvc already exists before we decide to raise the
# error for quota being exceeded. This is because quota is
# checked before determining if the PVC needed to be
# created.
pvc = self.get_pvc_manifest()
try:
yield self.asynchronize(
self.api.create_namespaced_persistent_volume_claim,
namespace=self.namespace,
body=pvc
)
except ApiException as e:
if e.status == 409:
self.log.info("PVC " + self.pvc_name + " already exists, so did not create new pvc.")
elif e.status == 403:
t, v, tb = sys.exc_info()
try:
yield self.asynchronize(
self.api.read_namespaced_persistent_volume_claim,
name=self.pvc_name,
namespace=self.namespace)
except ApiException as e:
raise v.with_traceback(tb)
self.log.info("PVC " + self.pvc_name + " already exists, possibly have reached quota though.")
else:
raise
# If we run into a 409 Conflict error, it means a pod with the
# same name already exists. We stop it, wait for it to stop, and
# try again. We try 4 times, and if it still fails we give up.
# FIXME: Have better / cleaner retry logic!
retry_times = 4
pod = yield self.get_pod_manifest()
if self.modify_pod_hook:
pod = yield gen.maybe_future(self.modify_pod_hook(self, pod))
for i in range(retry_times):
try:
yield self.asynchronize(
self.api.create_namespaced_pod,
self.namespace,
pod,
)
break
except ApiException as e:
if e.status != 409:
# We only want to handle 409 conflict errors
self.log.exception("Failed for %s", pod.to_str())
raise
self.log.info('Found existing pod %s, attempting to kill', self.pod_name)
# TODO: this should show up in events
yield self.stop(True)
self.log.info('Killed pod %s, will try starting singleuser pod again', self.pod_name)
else:
raise Exception(
'Can not create user pod %s already exists & could not be deleted' % self.pod_name)
# we need a timeout here even though start itself has a timeout
# in order for this coroutine to finish at some point.
# using the same start_timeout here
# essentially ensures that this timeout should never propagate up
# because the handler will have stopped waiting after
# start_timeout, starting from a slightly earlier point.
try:
yield exponential_backoff(
lambda: self.is_pod_running(self.pod_reflector.pods.get(self.pod_name, None)),
'pod/%s did not start in %s seconds!' % (self.pod_name, self.start_timeout),
timeout=self.start_timeout,
)
except TimeoutError:
if self.pod_name not in self.pod_reflector.pods:
# if pod never showed up at all,
# restart the pod reflector which may have become disconnected.
self.log.error(
"Pod %s never showed up in reflector, restarting pod reflector",
self.pod_name,
)
self._start_watching_pods(replace=True)
raise
pod = self.pod_reflector.pods[self.pod_name]
self.pod_id = pod.metadata.uid
if self.event_reflector:
self.log.debug(
'pod %s events before launch: %s',
self.pod_name,
"\n".join(
[
"%s [%s] %s" % (event.last_timestamp or event.event_time, event.type, event.message)
for event in self.events
]
),
)
return (pod.status.pod_ip, self.port)
@gen.coroutine
def stop(self, now=False):
delete_options = client.V1DeleteOptions()
if now:
grace_seconds = 0
else:
grace_seconds = self.delete_grace_period
delete_options.grace_period_seconds = grace_seconds
self.log.info("Deleting pod %s", self.pod_name)
try:
yield self.asynchronize(
self.api.delete_namespaced_pod,
name=self.pod_name,
namespace=self.namespace,
body=delete_options,
grace_period_seconds=grace_seconds,
)
except ApiException as e:
if e.status == 404:
self.log.warning(
"No pod %s to delete. Assuming already deleted.",
self.pod_name,
)
else:
raise
try:
yield exponential_backoff(
lambda: self.pod_reflector.pods.get(self.pod_name, None) is None,
'pod/%s did not disappear in %s seconds!' % (self.pod_name, self.start_timeout),
timeout=self.start_timeout,
)
except TimeoutError:
self.log.error("Pod %s did not disappear, restarting pod reflector", self.pod_name)
self._start_watching_pods(replace=True)
raise
@default('env_keep')
def _env_keep_default(self):
return []
_profile_list = None
def _render_options_form(self, profile_list):
self._profile_list = self._init_profile_list(profile_list)
profile_form_template = Environment(loader=BaseLoader).from_string(self.profile_form_template)
return profile_form_template.render(profile_list=self._profile_list)
@gen.coroutine
def _render_options_form_dynamically(self, current_spawner):
profile_list = yield gen.maybe_future(self.profile_list(current_spawner))
profile_list = self._init_profile_list(profile_list)
return self._render_options_form(profile_list)
@default('options_form')
def _options_form_default(self):
'''
Build the form template according to the `profile_list` setting.
Returns:
'' when no `profile_list` has been defined
The rendered template (using jinja2) when `profile_list` is defined.
'''
if not self.profile_list:
return ''
if callable(self.profile_list):
return self._render_options_form_dynamically
else:
return self._render_options_form(self.profile_list)
def options_from_form(self, formdata):
"""get the option selected by the user on the form
This only constructs the user_options dict,
it should not actually load any options.
That is done later in `.load_user_options()`
Args:
formdata: user selection returned by the form
To access to the value, you can use the `get` accessor and the name of the html element,
for example::
formdata.get('profile',[0])
to get the value of the form named "profile", as defined in `form_template`::
<select class="form-control" name="profile"...>
</select>
Returns:
user_options (dict): the selected profile in the user_options form,
e.g. ``{"profile": "cpus-8"}``
"""
return {
'profile': formdata.get('profile', [None])[0]
}
@gen.coroutine
def _load_profile(self, slug):
"""Load a profile by name
Called by load_user_options
"""
# find the profile
default_profile = self._profile_list[0]
for profile in self._profile_list:
if profile.get('default', False):
# explicit default, not the first
default_profile = profile
if profile['slug'] == slug:
break
else:
if slug:
# name specified, but not found
raise ValueError("No such profile: %s. Options include: %s" % (
slug, ', '.join(p['slug'] for p in self._profile_list)
))
else:
# no name specified, use the default
profile = default_profile
self.log.debug("Applying KubeSpawner override for profile '%s'", profile['display_name'])
kubespawner_override = profile.get('kubespawner_override', {})
for k, v in kubespawner_override.items():
if callable(v):
v = v(self)
self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v)
else:
self.log.debug(".. overriding KubeSpawner value %s=%s", k, v)
setattr(self, k, v)
# set of recognised user option keys
# used for warning about ignoring unrecognised options
_user_option_keys = {'profile',}
def _init_profile_list(self, profile_list):
# generate missing slug fields from display_name
for profile in profile_list:
if 'slug' not in profile:
profile['slug'] = slugify(profile['display_name'])
return profile_list
@gen.coroutine
def load_user_options(self):
"""Load user options from self.user_options dict
This can be set via POST to the API or via options_from_form
Only supported argument by default is 'profile'.
Override in subclasses to support other options.
"""
if self._profile_list is None:
if callable(self.profile_list):
profile_list = yield gen.maybe_future(self.profile_list(self))
else:
profile_list = self.profile_list
self._profile_list = self._init_profile_list(profile_list)
selected_profile = self.user_options.get('profile', None)
if self._profile_list:
yield self._load_profile(selected_profile)
elif selected_profile:
self.log.warning("Profile %r requested, but profiles are not enabled", selected_profile)
# help debugging by logging any option fields that are not recognized
option_keys = set(self.user_options)
unrecognized_keys = option_keys.difference(self._user_option_keys)
if unrecognized_keys:
self.log.warning(
"Ignoring unrecognized KubeSpawner user_options: %s",
", ".join(
map(
str,
sorted(unrecognized_keys)
)
)
)
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4094_0 |
crossvul-python_data_good_4544_0 | from django.core.exceptions import PermissionDenied
import qrcode
import qrcode.image.svg
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import SuccessURLAllowedHostsMixin
from django.http import HttpResponse
from django.shortcuts import resolve_url
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.http import is_safe_url
from django.views.decorators.cache import never_cache
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import (
DeleteView, FormView, ListView, UpdateView, View)
from django_otp import login as otp_login
from django_otp.plugins.otp_totp.models import TOTPDevice
from wagtail_2fa import forms, utils
from wagtail_2fa.mixins import OtpRequiredMixin
class LoginView(SuccessURLAllowedHostsMixin, FormView):
template_name = "wagtail_2fa/otp_form.html"
form_class = forms.TokenForm
redirect_field_name = REDIRECT_FIELD_NAME
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context[self.redirect_field_name] = self.get_redirect_url()
return context
def form_valid(self, form):
otp_login(self.request, self.request.user.otp_device)
return super().form_valid(form)
def get_redirect_url(self):
"""Return the user-originating redirect URL if it's safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name, self.request.GET.get(self.redirect_field_name, "")
)
url_is_safe = is_safe_url(
url=redirect_to,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
return redirect_to if url_is_safe else ""
def get_success_url(self):
url = self.get_redirect_url()
return url or resolve_url(settings.LOGIN_REDIRECT_URL)
class DeviceListView(OtpRequiredMixin, ListView):
template_name = "wagtail_2fa/device_list.html"
# require OTP if configured
if_configured = True
def get_queryset(self):
return TOTPDevice.objects.devices_for_user(self.kwargs['user_id'], confirmed=True)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['user_id'] = int(self.kwargs['user_id'])
return context
def dispatch(self, request, *args, **kwargs):
if (int(self.kwargs["user_id"]) == request.user.pk or
request.user.has_perm("user.change_user")):
if not self.user_allowed(request.user):
return self.handle_no_permission(request)
return super(OtpRequiredMixin, self).dispatch(request, *args, **kwargs)
raise PermissionDenied
class DeviceCreateView(OtpRequiredMixin, FormView):
form_class = forms.DeviceForm
template_name = "wagtail_2fa/device_form.html"
# require OTP if configured
if_configured = True
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["request"] = self.request
kwargs["instance"] = self.device
return kwargs
def form_valid(self, form):
form.save()
utils.delete_unconfirmed_devices(self.request.user)
if not self.request.user.is_verified():
otp_login(self.request, form.instance)
return super().form_valid(form)
def get_success_url(self):
return reverse('wagtail_2fa_device_list', kwargs={'user_id': self.request.user.id})
@cached_property
def device(self):
if self.request.method.lower() == "get":
return utils.new_unconfirmed_device(self.request.user)
else:
return utils.get_unconfirmed_device(self.request.user)
class DeviceUpdateView(OtpRequiredMixin, UpdateView):
form_class = forms.DeviceForm
template_name = "wagtail_2fa/device_form.html"
def get_queryset(self):
return TOTPDevice.objects.devices_for_user(self.request.user, confirmed=True)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["request"] = self.request
return kwargs
def get_success_url(self):
return reverse('wagtail_2fa_device_list', kwargs={'user_id': self.request.user.id})
class DeviceDeleteView(OtpRequiredMixin, DeleteView):
template_name = "wagtail_2fa/device_confirm_delete.html"
def get_queryset(self):
device = TOTPDevice.objects.get(**self.kwargs)
return TOTPDevice.objects.devices_for_user(device.user, confirmed=True)
def get_success_url(self):
return reverse('wagtail_2fa_device_list', kwargs={'user_id': self.request.POST.get('user_id')})
def dispatch(self, request, *args, **kwargs):
device = TOTPDevice.objects.get(**self.kwargs)
if device.user.pk == request.user.pk or request.user.has_perm("user.change_user"):
if not self.user_allowed(request.user):
return self.handle_no_permission(request)
return super(OtpRequiredMixin, self).dispatch(request, *args, **kwargs)
raise PermissionDenied
class DeviceQRCodeView(OtpRequiredMixin, View):
# require OTP if configured
if_configured = True
def get(self, request):
device = utils.get_unconfirmed_device(self.request.user)
img = qrcode.make(device.config_url, image_factory=qrcode.image.svg.SvgImage)
response = HttpResponse(content_type="image/svg+xml")
img.save(response)
return response
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4544_0 |
crossvul-python_data_good_4366_2 | """CILogon OAuthAuthenticator for JupyterHub
Uses OAuth 2.0 with cilogon.org (override with CILOGON_HOST)
Caveats:
- For allowed user list /admin purposes, username will be the ePPN by default.
This is typically an email address and may not work as a Unix userid.
Normalization may be required to turn the JupyterHub username into a Unix username.
- Default username_claim of ePPN does not work for all providers,
e.g. generic OAuth such as Google.
Use `c.CILogonOAuthenticator.username_claim = 'email'` to use
email instead of ePPN as the JupyterHub username.
"""
import json
import os
from tornado.auth import OAuth2Mixin
from tornado import web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from traitlets import Unicode, List, Bool, default, validate, observe
from jupyterhub.auth import LocalAuthenticator
from .oauth2 import OAuthLoginHandler, OAuthenticator
class CILogonLoginHandler(OAuthLoginHandler):
"""See http://www.cilogon.org/oidc for general information."""
def authorize_redirect(self, *args, **kwargs):
"""Add idp, skin to redirect params"""
extra_params = kwargs.setdefault('extra_params', {})
if self.authenticator.idp:
extra_params["selected_idp"] = self.authenticator.idp
if self.authenticator.skin:
extra_params["skin"] = self.authenticator.skin
return super().authorize_redirect(*args, **kwargs)
class CILogonOAuthenticator(OAuthenticator):
_deprecated_oauth_aliases = {
"idp_whitelist": ("allowed_idps", "0.12.0"),
**OAuthenticator._deprecated_oauth_aliases,
}
login_service = "CILogon"
client_id_env = 'CILOGON_CLIENT_ID'
client_secret_env = 'CILOGON_CLIENT_SECRET'
login_handler = CILogonLoginHandler
cilogon_host = Unicode(os.environ.get("CILOGON_HOST") or "cilogon.org", config=True)
@default("authorize_url")
def _authorize_url_default(self):
return "https://%s/authorize" % self.cilogon_host
@default("token_url")
def _token_url(self):
return "https://%s/oauth2/token" % self.cilogon_host
scope = List(
Unicode(),
default_value=['openid', 'email', 'org.cilogon.userinfo'],
config=True,
help="""The OAuth scopes to request.
See cilogon_scope.md for details.
At least 'openid' is required.
""",
)
@validate('scope')
def _validate_scope(self, proposal):
"""ensure openid is requested"""
if 'openid' not in proposal.value:
return ['openid'] + proposal.value
return proposal.value
idp_whitelist = List(help="Deprecated, use `CIlogonOAuthenticator.allowed_idps`", config=True,)
allowed_idps = List(
config=True,
help="""A list of IDP which can be stripped from the username after the @ sign.""",
)
strip_idp_domain = Bool(
False,
config=True,
help="""Remove the IDP domain from the username. Note that only domains which
appear in the `allowed_idps` will be stripped.""",
)
idp = Unicode(
config=True,
help="""The `idp` attribute is the SAML Entity ID of the user's selected
identity provider.
See https://cilogon.org/include/idplist.xml for the list of identity
providers supported by CILogon.
""",
)
skin = Unicode(
config=True,
help="""The `skin` attribute is the name of the custom CILogon interface skin
for your application.
Contact help@cilogon.org to request a custom skin.
""",
)
username_claim = Unicode(
"eppn",
config=True,
help="""The claim in the userinfo response from which to get the JupyterHub username
Examples include: eppn, email
What keys are available will depend on the scopes requested.
See http://www.cilogon.org/oidc for details.
""",
)
additional_username_claims = List(
config=True,
help="""Additional claims to check if the username_claim fails.
This is useful for linked identities where not all of them return
the primary username_claim.
""",
)
async def authenticate(self, handler, data=None):
"""We set up auth_state based on additional CILogon info if we
receive it.
"""
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a CILogon Access Token
# See: http://www.cilogon.org/oidc
headers = {"Accept": "application/json", "User-Agent": "JupyterHub"}
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=self.oauth_callback_url,
code=code,
grant_type='authorization_code',
)
url = url_concat(self.token_url, params)
req = HTTPRequest(url, headers=headers, method="POST", body='')
resp = await http_client.fetch(req)
token_response = json.loads(resp.body.decode('utf8', 'replace'))
access_token = token_response['access_token']
self.log.info("Access token acquired.")
# Determine who the logged in user is
params = dict(access_token=access_token)
req = HTTPRequest(
url_concat("https://%s/oauth2/userinfo" % self.cilogon_host, params),
headers=headers,
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
claimlist = [self.username_claim]
if self.additional_username_claims:
claimlist.extend(self.additional_username_claims)
for claim in claimlist:
username = resp_json.get(claim)
if username:
break
if not username:
if len(claimlist) < 2:
self.log.error(
"Username claim %s not found in response: %s",
self.username_claim,
sorted(resp_json.keys()),
)
else:
self.log.error(
"No username claim from %r in response: %s",
claimlist,
sorted(resp_json.keys()),
)
raise web.HTTPError(500, "Failed to get username from CILogon")
if self.allowed_idps:
gotten_name, gotten_idp = username.split('@')
if gotten_idp not in self.allowed_idps:
self.log.error(
"Trying to login from not allowed domain %s", gotten_idp
)
raise web.HTTPError(500, "Trying to login from a domain not allowed")
if len(self.allowed_idps) == 1 and self.strip_idp_domain:
username = gotten_name
userdict = {"name": username}
# Now we set up auth_state
userdict["auth_state"] = auth_state = {}
# Save the token response and full CILogon reply in auth state
# These can be used for user provisioning
# in the Lab/Notebook environment.
auth_state['token_response'] = token_response
# store the whole user model in auth_state.cilogon_user
# keep access_token as well, in case anyone was relying on it
auth_state['access_token'] = access_token
auth_state['cilogon_user'] = resp_json
return userdict
class LocalCILogonOAuthenticator(LocalAuthenticator, CILogonOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4366_2 |
crossvul-python_data_good_4126_1 | #!/usr/bin/env python
# Copyright 2017, New York University and the TUF contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
"""
<Program Name>
settings.py
<Author>
Vladimir Diaz <vladimir.v.diaz@gmail.com>
<Started>
January 11, 2017
<Copyright>
See LICENSE-MIT OR LICENSE for licensing information.
<Purpose>
A central location for TUF configuration settings. Example options include
setting the destination of temporary files and downloaded content, the maximum
length of downloaded metadata (unknown file attributes), and download
behavior.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
# Set a directory that should be used for all temporary files. If this
# is None, then the system default will be used. The system default
# will also be used if a directory path set here is invalid or
# unusable.
temporary_directory = None
# Set a local directory to store metadata that is requested from mirrors. This
# directory contains subdirectories for different repositories, where each
# subdirectory contains a different set of metadata. For example:
# tuf.settings.repositories_directory = /tmp/repositories. The root file for a
# repository named 'django_repo' can be found at:
# /tmp/repositories/django_repo/metadata/current/root.METADATA_EXTENSION
repositories_directory = None
# The 'log.py' module manages TUF's logging system. Users have the option to
# enable/disable logging to a file via 'ENABLE_FILE_LOGGING', or
# tuf.log.enable_file_logging() and tuf.log.disable_file_logging().
ENABLE_FILE_LOGGING = False
# If file logging is enabled via 'ENABLE_FILE_LOGGING', TUF log messages will
# be saved to 'LOG_FILENAME'
LOG_FILENAME = 'tuf.log'
# Since the timestamp role does not have signed metadata about itself, we set a
# default but sane upper bound for the number of bytes required to download it.
DEFAULT_TIMESTAMP_REQUIRED_LENGTH = 16384 #bytes
# The Root role may be updated without knowing its version if top-level
# metadata cannot be safely downloaded (e.g., keys may have been revoked, thus
# requiring a new Root file that includes the updated keys). Set a default
# upper bound for the maximum total bytes that may be downloaded for Root
# metadata.
DEFAULT_ROOT_REQUIRED_LENGTH = 512000 #bytes
# Set a default, but sane, upper bound for the number of bytes required to
# download Snapshot metadata.
DEFAULT_SNAPSHOT_REQUIRED_LENGTH = 2000000 #bytes
# Set a default, but sane, upper bound for the number of bytes required to
# download Targets metadata.
DEFAULT_TARGETS_REQUIRED_LENGTH = 5000000 #bytes
# Set a timeout value in seconds (float) for non-blocking socket operations.
SOCKET_TIMEOUT = 4 #seconds
# The maximum chunk of data, in bytes, we would download in every round.
CHUNK_SIZE = 400000 #bytes
# The minimum average download speed (bytes/second) that must be met to
# avoid being considered as a slow retrieval attack.
MIN_AVERAGE_DOWNLOAD_SPEED = 50 #bytes/second
# By default, limit number of delegatees we visit for any target.
MAX_NUMBER_OF_DELEGATIONS = 2**5
# This configuration is for indicating how consistent files should be created.
# There are two options: "copy" and "hard_link". For "copy", the consistent
# file with be a copy of root.json. This approach will require the most disk
# space out of the two options. For "hard_link", the latest root.json will be
# a hard link to 2.root.json (for example). This approach is more efficient in
# terms of disk space usage. By default, we use 'copy'.
CONSISTENT_METHOD = 'copy'
# A setting for the instances where a default hashing algorithm is needed.
# This setting is currently used to calculate the path hash prefixes of hashed
# bin delegations, and digests of targets filepaths. The other instances
# (e.g., digest of files) that require a hashing algorithm rely on settings in
# the securesystemslib external library.
DEFAULT_HASH_ALGORITHM = 'sha256'
# The client's update procedure (contained within a while-loop) can potentially
# hog the CPU. The following setting can be used to force the update sequence
# to suspend execution for a specified amount of time. See
# theupdateframework/tuf/issue#338.
SLEEP_BEFORE_ROUND = None
# Maximum number of root metadata file rotations we should perform in order to
# prevent a denial-of-service (DoS) attack.
MAX_NUMBER_ROOT_ROTATIONS = 2**5
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4126_1 |
crossvul-python_data_good_2493_1 | from __future__ import absolute_import
from typing import Any, Optional, Tuple, List, Set, Iterable, Mapping, Callable, Dict
from django.utils.translation import ugettext as _
from django.conf import settings
from django.db import transaction
from django.http import HttpRequest, HttpResponse
from zerver.lib.request import JsonableError, REQ, has_request_variables
from zerver.decorator import authenticated_json_post_view, \
authenticated_json_view, \
get_user_profile_by_email, require_realm_admin, to_non_negative_int
from zerver.lib.actions import bulk_remove_subscriptions, \
do_change_subscription_property, internal_prep_message, \
create_streams_if_needed, gather_subscriptions, subscribed_to_stream, \
bulk_add_subscriptions, do_send_messages, get_subscriber_emails, do_rename_stream, \
do_deactivate_stream, do_make_stream_public, do_add_default_stream, \
do_change_stream_description, do_get_streams, do_make_stream_private, \
do_remove_default_stream, get_topic_history_for_stream
from zerver.lib.response import json_success, json_error, json_response
from zerver.lib.validator import check_string, check_list, check_dict, \
check_bool, check_variable_type
from zerver.models import UserProfile, Stream, Realm, Subscription, \
Recipient, get_recipient, get_stream, bulk_get_streams, \
bulk_get_recipients, valid_stream_name, get_active_user_dicts_in_realm
from collections import defaultdict
import ujson
from six.moves import urllib
import six
from typing import Text
def is_active_subscriber(user_profile, recipient):
# type: (UserProfile, Recipient) -> bool
return Subscription.objects.filter(user_profile=user_profile,
recipient=recipient,
active=True).exists()
def list_to_streams(streams_raw, user_profile, autocreate=False):
# type: (Iterable[Mapping[str, Any]], UserProfile, Optional[bool]) -> Tuple[List[Stream], List[Stream]]
"""Converts list of dicts to a list of Streams, validating input in the process
For each stream name, we validate it to ensure it meets our
requirements for a proper stream name: that is, that it is shorter
than Stream.MAX_NAME_LENGTH characters and passes
valid_stream_name.
This function in autocreate mode should be atomic: either an exception will be raised
during a precheck, or all the streams specified will have been created if applicable.
@param streams_raw The list of stream dictionaries to process;
names should already be stripped of whitespace by the caller.
@param user_profile The user for whom we are retreiving the streams
@param autocreate Whether we should create streams if they don't already exist
"""
# Validate all streams, getting extant ones, then get-or-creating the rest.
stream_set = set(stream_dict["name"] for stream_dict in streams_raw)
for stream_name in stream_set:
# Stream names should already have been stripped by the
# caller, but it makes sense to verify anyway.
assert stream_name == stream_name.strip()
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(_("Stream name (%s) too long.") % (stream_name,))
if not valid_stream_name(stream_name):
raise JsonableError(_("Invalid stream name (%s).") % (stream_name,))
existing_streams = [] # type: List[Stream]
missing_stream_dicts = [] # type: List[Mapping[str, Any]]
existing_stream_map = bulk_get_streams(user_profile.realm, stream_set)
for stream_dict in streams_raw:
stream_name = stream_dict["name"]
stream = existing_stream_map.get(stream_name.lower())
if stream is None:
missing_stream_dicts.append(stream_dict)
else:
existing_streams.append(stream)
if len(missing_stream_dicts) == 0:
# This is the happy path for callers who expected all of these
# streams to exist already.
created_streams = [] # type: List[Stream]
else:
# autocreate=True path starts here
if not user_profile.can_create_streams():
raise JsonableError(_('User cannot create streams.'))
elif not autocreate:
raise JsonableError(_("Stream(s) (%s) do not exist") % ", ".join(
stream_dict["name"] for stream_dict in missing_stream_dicts))
# We already filtered out existing streams, so dup_streams
# will normally be an empty list below, but we protect against somebody
# else racing to create the same stream. (This is not an entirely
# paranoid approach, since often on Zulip two people will discuss
# creating a new stream, and both people eagerly do it.)
created_streams, dup_streams = create_streams_if_needed(realm=user_profile.realm,
stream_dicts=missing_stream_dicts)
existing_streams += dup_streams
return existing_streams, created_streams
class PrincipalError(JsonableError):
def __init__(self, principal, status_code=403):
# type: (Text, int) -> None
self.principal = principal # type: Text
self.status_code = status_code # type: int
def to_json_error_msg(self):
# type: () -> Text
return ("User not authorized to execute queries on behalf of '%s'"
% (self.principal,))
def principal_to_user_profile(agent, principal):
# type: (UserProfile, Text) -> UserProfile
principal_doesnt_exist = False
try:
principal_user_profile = get_user_profile_by_email(principal)
except UserProfile.DoesNotExist:
principal_doesnt_exist = True
if (principal_doesnt_exist or
agent.realm != principal_user_profile.realm):
# We have to make sure we don't leak information about which users
# are registered for Zulip in a different realm. We could do
# something a little more clever and check the domain part of the
# principal to maybe give a better error message
raise PrincipalError(principal)
return principal_user_profile
@require_realm_admin
def deactivate_stream_backend(request, user_profile, stream_id):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
target = get_and_validate_stream_by_id(stream_id, user_profile.realm)
if target.invite_only and not subscribed_to_stream(user_profile, target):
return json_error(_('Cannot administer invite-only streams this way'))
do_deactivate_stream(target)
return json_success()
@require_realm_admin
@has_request_variables
def add_default_stream(request, user_profile, stream_name=REQ()):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
do_add_default_stream(user_profile.realm, stream_name)
return json_success()
@require_realm_admin
@has_request_variables
def remove_default_stream(request, user_profile, stream_name=REQ()):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
do_remove_default_stream(user_profile.realm, stream_name)
return json_success()
@require_realm_admin
@has_request_variables
def update_stream_backend(request, user_profile, stream_id,
description=REQ(validator=check_string, default=None),
is_private=REQ(validator=check_bool, default=None),
new_name=REQ(validator=check_string, default=None)):
# type: (HttpRequest, UserProfile, int, Optional[Text], Optional[bool], Optional[Text]) -> HttpResponse
stream = get_and_validate_stream_by_id(stream_id, user_profile.realm)
stream_name = stream.name
if description is not None:
do_change_stream_description(user_profile.realm, stream_name, description)
if stream_name is not None and new_name is not None:
do_rename_stream(user_profile.realm, stream_name, new_name)
if is_private is not None:
if is_private:
do_make_stream_private(user_profile.realm, stream_name)
else:
do_make_stream_public(user_profile, user_profile.realm, stream_name)
return json_success()
def list_subscriptions_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return json_success({"subscriptions": gather_subscriptions(user_profile)[0]})
FuncKwargPair = Tuple[Callable[..., HttpResponse], Dict[str, Iterable[Any]]]
@has_request_variables
def update_subscriptions_backend(request, user_profile,
delete=REQ(validator=check_list(check_string), default=[]),
add=REQ(validator=check_list(check_dict([('name', check_string)])), default=[])):
# type: (HttpRequest, UserProfile, Iterable[Text], Iterable[Mapping[str, Any]]) -> HttpResponse
if not add and not delete:
return json_error(_('Nothing to do. Specify at least one of "add" or "delete".'))
method_kwarg_pairs = [
(add_subscriptions_backend, dict(streams_raw=add)),
(remove_subscriptions_backend, dict(streams_raw=delete))
] # type: List[FuncKwargPair]
return compose_views(request, user_profile, method_kwarg_pairs)
def compose_views(request, user_profile, method_kwarg_pairs):
# type: (HttpRequest, UserProfile, List[FuncKwargPair]) -> HttpResponse
'''
This takes a series of view methods from method_kwarg_pairs and calls
them in sequence, and it smushes all the json results into a single
response when everything goes right. (This helps clients avoid extra
latency hops.) It rolls back the transaction when things go wrong in
any one of the composed methods.
TODO: Move this a utils-like module if we end up using it more widely.
'''
json_dict = {} # type: Dict[str, Any]
with transaction.atomic():
for method, kwargs in method_kwarg_pairs:
response = method(request, user_profile, **kwargs)
if response.status_code != 200:
raise JsonableError(response.content)
json_dict.update(ujson.loads(response.content))
return json_success(json_dict)
@authenticated_json_post_view
def json_remove_subscriptions(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return remove_subscriptions_backend(request, user_profile)
@has_request_variables
def remove_subscriptions_backend(request, user_profile,
streams_raw = REQ("subscriptions", validator=check_list(check_string)),
principals = REQ(validator=check_list(check_string), default=None)):
# type: (HttpRequest, UserProfile, Iterable[Text], Optional[Iterable[Text]]) -> HttpResponse
removing_someone_else = principals and \
set(principals) != set((user_profile.email,))
if removing_someone_else and not user_profile.is_realm_admin:
# You can only unsubscribe other people from a stream if you are a realm
# admin.
return json_error(_("This action requires administrative rights"))
streams_as_dict = []
for stream_name in streams_raw:
streams_as_dict.append({"name": stream_name.strip()})
streams, __ = list_to_streams(streams_as_dict, user_profile)
for stream in streams:
if removing_someone_else and stream.invite_only and \
not subscribed_to_stream(user_profile, stream):
# Even as an admin, you can't remove other people from an
# invite-only stream you're not on.
return json_error(_("Cannot administer invite-only streams this way"))
if principals:
people_to_unsub = set(principal_to_user_profile(
user_profile, principal) for principal in principals)
else:
people_to_unsub = set([user_profile])
result = dict(removed=[], not_subscribed=[]) # type: Dict[str, List[Text]]
(removed, not_subscribed) = bulk_remove_subscriptions(people_to_unsub, streams)
for (subscriber, stream) in removed:
result["removed"].append(stream.name)
for (subscriber, stream) in not_subscribed:
result["not_subscribed"].append(stream.name)
return json_success(result)
def filter_stream_authorization(user_profile, streams):
# type: (UserProfile, Iterable[Stream]) -> Tuple[List[Stream], List[Stream]]
streams_subscribed = set() # type: Set[int]
recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams])
subs = Subscription.objects.filter(user_profile=user_profile,
recipient__in=list(recipients_map.values()),
active=True)
for sub in subs:
streams_subscribed.add(sub.recipient.type_id)
unauthorized_streams = [] # type: List[Stream]
for stream in streams:
# The user is authorized for his own streams
if stream.id in streams_subscribed:
continue
# The user is not authorized for invite_only streams
if stream.invite_only:
unauthorized_streams.append(stream)
authorized_streams = [stream for stream in streams if
stream.id not in set(stream.id for stream in unauthorized_streams)]
return authorized_streams, unauthorized_streams
@has_request_variables
def add_subscriptions_backend(request, user_profile,
streams_raw = REQ("subscriptions",
validator=check_list(check_dict([('name', check_string)]))),
invite_only = REQ(validator=check_bool, default=False),
announce = REQ(validator=check_bool, default=False),
principals = REQ(validator=check_list(check_string), default=None),
authorization_errors_fatal = REQ(validator=check_bool, default=True)):
# type: (HttpRequest, UserProfile, Iterable[Mapping[str, Text]], bool, bool, Optional[List[Text]], bool) -> HttpResponse
stream_dicts = []
for stream_dict in streams_raw:
stream_dict_copy = {} # type: Dict[str, Any]
for field in stream_dict:
stream_dict_copy[field] = stream_dict[field]
# Strip the stream name here.
stream_dict_copy['name'] = stream_dict_copy['name'].strip()
stream_dict_copy["invite_only"] = invite_only
stream_dicts.append(stream_dict_copy)
# Validation of the streams arguments, including enforcement of
# can_create_streams policy and valid_stream_name policy is inside
# list_to_streams.
existing_streams, created_streams = \
list_to_streams(stream_dicts, user_profile, autocreate=True)
authorized_streams, unauthorized_streams = \
filter_stream_authorization(user_profile, existing_streams)
if len(unauthorized_streams) > 0 and authorization_errors_fatal:
return json_error(_("Unable to access stream (%s).") % unauthorized_streams[0].name)
# Newly created streams are also authorized for the creator
streams = authorized_streams + created_streams
if principals is not None:
if user_profile.realm.is_zephyr_mirror_realm and not all(stream.invite_only for stream in streams):
return json_error(_("You can only invite other Zephyr mirroring users to invite-only streams."))
subscribers = set(principal_to_user_profile(user_profile, principal) for principal in principals)
else:
subscribers = set([user_profile])
(subscribed, already_subscribed) = bulk_add_subscriptions(streams, subscribers)
result = dict(subscribed=defaultdict(list), already_subscribed=defaultdict(list)) # type: Dict[str, Any]
for (subscriber, stream) in subscribed:
result["subscribed"][subscriber.email].append(stream.name)
for (subscriber, stream) in already_subscribed:
result["already_subscribed"][subscriber.email].append(stream.name)
private_streams = dict((stream.name, stream.invite_only) for stream in streams)
bots = dict((subscriber.email, subscriber.is_bot) for subscriber in subscribers)
# Inform the user if someone else subscribed them to stuff,
# or if a new stream was created with the "announce" option.
notifications = []
if principals and result["subscribed"]:
for email, subscriptions in six.iteritems(result["subscribed"]):
if email == user_profile.email:
# Don't send a Zulip if you invited yourself.
continue
if bots[email]:
# Don't send invitation Zulips to bots
continue
if len(subscriptions) == 1:
msg = ("Hi there! We thought you'd like to know that %s just "
"subscribed you to the%s stream #**%s**."
% (user_profile.full_name,
" **invite-only**" if private_streams[subscriptions[0]] else "",
subscriptions[0],
))
else:
msg = ("Hi there! We thought you'd like to know that %s just "
"subscribed you to the following streams: \n\n"
% (user_profile.full_name,))
for stream in subscriptions:
msg += "* #**%s**%s\n" % (
stream,
" (**invite-only**)" if private_streams[stream] else "")
if len([s for s in subscriptions if not private_streams[s]]) > 0:
msg += "\nYou can see historical content on a non-invite-only stream by narrowing to it."
notifications.append(internal_prep_message(
user_profile.realm, settings.NOTIFICATION_BOT,
"private", email, "", msg))
if announce and len(created_streams) > 0:
notifications_stream = user_profile.realm.notifications_stream
if notifications_stream is not None:
if len(created_streams) > 1:
stream_msg = "the following streams: %s" % (", ".join('#**%s**' % s.name for s in created_streams))
else:
stream_msg = "a new stream #**%s**." % created_streams[0].name
msg = ("%s just created %s" % (user_profile.full_name, stream_msg))
notifications.append(
internal_prep_message(user_profile.realm, settings.NOTIFICATION_BOT,
"stream",
notifications_stream.name, "Streams", msg))
else:
msg = ("Hi there! %s just created a new stream #**%s**."
% (user_profile.full_name, created_streams[0].name))
for realm_user_dict in get_active_user_dicts_in_realm(user_profile.realm):
# Don't announce to yourself or to people you explicitly added
# (who will get the notification above instead).
if realm_user_dict['email'] in principals or realm_user_dict['email'] == user_profile.email:
continue
notifications.append(internal_prep_message(
user_profile.realm, settings.NOTIFICATION_BOT,
"private",
realm_user_dict['email'], "", msg))
if len(notifications) > 0:
do_send_messages(notifications)
result["subscribed"] = dict(result["subscribed"])
result["already_subscribed"] = dict(result["already_subscribed"])
if not authorization_errors_fatal:
result["unauthorized"] = [stream.name for stream in unauthorized_streams]
return json_success(result)
@has_request_variables
def get_subscribers_backend(request, user_profile,
stream_id=REQ('stream', converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
stream = get_and_validate_stream_by_id(stream_id, user_profile.realm)
subscribers = get_subscriber_emails(stream, user_profile)
return json_success({'subscribers': subscribers})
# By default, lists all streams that the user has access to --
# i.e. public streams plus invite-only streams that the user is on
@has_request_variables
def get_streams_backend(request, user_profile,
include_public=REQ(validator=check_bool, default=True),
include_subscribed=REQ(validator=check_bool, default=True),
include_all_active=REQ(validator=check_bool, default=False),
include_default=REQ(validator=check_bool, default=False)):
# type: (HttpRequest, UserProfile, bool, bool, bool, bool) -> HttpResponse
streams = do_get_streams(user_profile, include_public=include_public,
include_subscribed=include_subscribed,
include_all_active=include_all_active,
include_default=include_default)
return json_success({"streams": streams})
@has_request_variables
def get_topics_backend(request, user_profile,
stream_id=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
stream = get_and_validate_stream_by_id(stream_id, user_profile.realm)
if stream.realm_id != user_profile.realm_id:
return json_error(_("Invalid stream id"))
recipient = get_recipient(Recipient.STREAM, stream.id)
if not stream.is_public():
if not is_active_subscriber(user_profile=user_profile,
recipient=recipient):
return json_error(_("Invalid stream id"))
result = get_topic_history_for_stream(
user_profile=user_profile,
recipient=recipient,
)
# Our data structure here is a list of tuples of
# (topic name, unread count), and it's reverse chronological,
# so the most recent topic is the first element of the list.
return json_success(dict(topics=result))
@authenticated_json_post_view
@has_request_variables
def json_stream_exists(request, user_profile, stream=REQ(),
autosubscribe=REQ(default=False)):
# type: (HttpRequest, UserProfile, Text, bool) -> HttpResponse
if not valid_stream_name(stream):
return json_error(_("Invalid characters in stream name"))
try:
stream_id = Stream.objects.get(realm=user_profile.realm, name=stream).id
except Stream.DoesNotExist:
stream_id = None
return stream_exists_backend(request, user_profile, stream_id, autosubscribe)
def stream_exists_backend(request, user_profile, stream_id, autosubscribe):
# type: (HttpRequest, UserProfile, int, bool) -> HttpResponse
try:
stream = get_and_validate_stream_by_id(stream_id, user_profile.realm)
except JsonableError:
stream = None
result = {"exists": bool(stream)}
if stream is not None:
recipient = get_recipient(Recipient.STREAM, stream.id)
if not stream.invite_only and autosubscribe:
bulk_add_subscriptions([stream], [user_profile])
result["subscribed"] = is_active_subscriber(
user_profile=user_profile,
recipient=recipient)
return json_success(result) # results are ignored for HEAD requests
return json_response(data=result, status=404)
def get_and_validate_stream_by_id(stream_id, realm):
# type: (int, Realm) -> Stream
try:
stream = Stream.objects.get(pk=stream_id, realm_id=realm.id)
except Stream.DoesNotExist:
raise JsonableError(_("Invalid stream id"))
return stream
@has_request_variables
def json_get_stream_id(request, user_profile, stream=REQ()):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
stream_id = Stream.objects.get(realm=user_profile.realm, name=stream).id
except Stream.DoesNotExist:
return json_error(_("No such stream name"))
return json_success({'stream_id': stream_id})
def get_subscription_or_die(stream_name, user_profile):
# type: (Text, UserProfile) -> Subscription
stream = get_stream(stream_name, user_profile.realm)
if not stream:
raise JsonableError(_("Invalid stream %s") % (stream_name,))
recipient = get_recipient(Recipient.STREAM, stream.id)
subscription = Subscription.objects.filter(user_profile=user_profile,
recipient=recipient, active=True)
if not subscription.exists():
raise JsonableError(_("Not subscribed to stream %s") % (stream_name,))
return subscription
@authenticated_json_view
@has_request_variables
def json_subscription_property(request, user_profile, subscription_data=REQ(
validator=check_list(
check_dict([("stream", check_string),
("property", check_string),
("value", check_variable_type(
[check_string, check_bool]))])))):
# type: (HttpRequest, UserProfile, List[Dict[str, Any]]) -> HttpResponse
"""
This is the entry point to changing subscription properties. This
is a bulk endpoint: requestors always provide a subscription_data
list containing dictionaries for each stream of interest.
Requests are of the form:
[{"stream": "devel", "property": "in_home_view", "value": False},
{"stream": "devel", "property": "color", "value": "#c2c2c2"}]
"""
if request.method != "POST":
return json_error(_("Invalid verb"))
property_converters = {"color": check_string, "in_home_view": check_bool,
"desktop_notifications": check_bool,
"audible_notifications": check_bool,
"pin_to_top": check_bool}
response_data = []
for change in subscription_data:
stream_name = change["stream"]
property = change["property"]
value = change["value"]
if property not in property_converters:
return json_error(_("Unknown subscription property: %s") % (property,))
sub = get_subscription_or_die(stream_name, user_profile)[0]
property_conversion = property_converters[property](property, value)
if property_conversion:
return json_error(property_conversion)
do_change_subscription_property(user_profile, sub, stream_name,
property, value)
response_data.append({'stream': stream_name,
'property': property,
'value': value})
return json_success({"subscription_data": response_data})
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_2493_1 |
crossvul-python_data_good_4126_0 | #!/usr/bin/env python
# Copyright 2012 - 2017, New York University and the TUF contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
"""
<Program Name>
updater.py
<Author>
Geremy Condra
Vladimir Diaz <vladimir.v.diaz@gmail.com>
<Started>
July 2012. Based on a previous version of this module. (VLAD)
<Copyright>
See LICENSE-MIT OR LICENSE for licensing information.
<Purpose>
'updater.py' is intended to be the only TUF module that software update
systems need to utilize. It provides a single class representing an
updater that includes methods to download, install, and verify
metadata/target files in a secure manner. Importing 'updater.py' and
instantiating its main class is all that is required by the client prior
to a TUF update request. The importation and instantiation steps allow
TUF to load all of the required metadata files and set the repository mirror
information.
An overview of the update process:
1. The software update system instructs TUF to check for updates.
2. TUF downloads and verifies timestamp.json.
3. If timestamp.json indicates that snapshot.json has changed, TUF downloads
and verifies snapshot.json.
4. TUF determines which metadata files listed in snapshot.json differ from
those described in the last snapshot.json that TUF has seen. If root.json
has changed, the update process starts over using the new root.json.
5. TUF provides the software update system with a list of available files
according to targets.json.
6. The software update system instructs TUF to download a specific target
file.
7. TUF downloads and verifies the file and then makes the file available to
the software update system.
<Example Client>
# The client first imports the 'updater.py' module, the only module the
# client is required to import. The client will utilize a single class
# from this module.
import tuf.client.updater
# The only other module the client interacts with is 'tuf.settings'. The
# client accesses this module solely to set the repository directory.
# This directory will hold the files downloaded from a remote repository.
tuf.settings.repositories_directory = 'local-repository'
# Next, the client creates a dictionary object containing the repository
# mirrors. The client may download content from any one of these mirrors.
# In the example below, a single mirror named 'mirror1' is defined. The
# mirror is located at 'http://localhost:8001', and all of the metadata
# and targets files can be found in the 'metadata' and 'targets' directory,
# respectively. If the client wishes to only download target files from
# specific directories on the mirror, the 'confined_target_dirs' field
# should be set. In the example, the client has chosen '', which is
# interpreted as no confinement. In other words, the client can download
# targets from any directory or subdirectories. If the client had chosen
# 'targets1/', they would have been confined to the '/targets/targets1/'
# directory on the 'http://localhost:8001' mirror.
repository_mirrors = {'mirror1': {'url_prefix': 'http://localhost:8001',
'metadata_path': 'metadata',
'targets_path': 'targets',
'confined_target_dirs': ['']}}
# The updater may now be instantiated. The Updater class of 'updater.py'
# is called with two arguments. The first argument assigns a name to this
# particular updater and the second argument the repository mirrors defined
# above.
updater = tuf.client.updater.Updater('updater', repository_mirrors)
# The client next calls the refresh() method to ensure it has the latest
# copies of the metadata files.
updater.refresh()
# get_one_valid_targetinfo() updates role metadata when required. In other
# words, if the client doesn't possess the metadata that lists 'LICENSE.txt',
# get_one_valid_targetinfo() will try to fetch / update it.
target = updater.get_one_valid_targetinfo('LICENSE.txt')
# Determine if 'target' has changed since the client's last refresh(). A
# target is considered updated if it does not exist in
# 'destination_directory' (current directory) or the target located there has
# changed.
destination_directory = '.'
updated_target = updater.updated_targets([target], destination_directory)
for target in updated_target:
updater.download_target(target, destination_directory)
# Client code here may also reference target information (including
# 'custom') by directly accessing the dictionary entries of the target.
# The 'custom' entry is additional file information explicitly set by the
# remote repository.
target_path = target['filepath']
target_length = target['fileinfo']['length']
target_hashes = target['fileinfo']['hashes']
target_custom_data = target['fileinfo']['custom']
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import errno
import logging
import os
import shutil
import time
import fnmatch
import copy
import warnings
import tuf
import tuf.download
import tuf.formats
import tuf.settings
import tuf.keydb
import tuf.log
import tuf.mirrors
import tuf.roledb
import tuf.sig
import tuf.exceptions
import securesystemslib.hash
import securesystemslib.keys
import securesystemslib.util
import six
import iso8601
import requests.exceptions
# The Timestamp role does not have signed metadata about it; otherwise we
# would need an infinite regress of metadata. Therefore, we use some
# default, but sane, upper file length for its metadata.
DEFAULT_TIMESTAMP_UPPERLENGTH = tuf.settings.DEFAULT_TIMESTAMP_REQUIRED_LENGTH
# The Root role may be updated without knowing its version number if
# top-level metadata cannot be safely downloaded (e.g., keys may have been
# revoked, thus requiring a new Root file that includes the updated keys)
# and 'unsafely_update_root_if_necessary' is True.
# We use some default, but sane, upper file length for its metadata.
DEFAULT_ROOT_UPPERLENGTH = tuf.settings.DEFAULT_ROOT_REQUIRED_LENGTH
# See 'log.py' to learn how logging is handled in TUF.
logger = logging.getLogger('tuf.client.updater')
# Disable 'iso8601' logger messages to prevent 'iso8601' from clogging the
# log file.
iso8601_logger = logging.getLogger('iso8601')
iso8601_logger.disabled = True
class MultiRepoUpdater(object):
"""
<Purpose>
Provide a way for clients to request a target file from multiple
repositories. Which repositories to query is determined by the map
file (i.e,. map.json).
See TAP 4 for more information on the map file and how to request updates
from multiple repositories. TAP 4 describes how users may specify that a
particular threshold of repositories be used for some targets, while a
different threshold of repositories be used for others.
<Arguments>
map_file:
The path of the map file. The map file is needed to determine which
repositories to query given a target file.
<Exceptions>
securesystemslib.exceptions.FormatError, if the map file is improperly
formatted.
tuf.exceptions.Error, if the map file cannot be loaded.
<Side Effects>
None.
<Returns>
None.
"""
def __init__(self, map_file):
# Is 'map_file' a path? If not, raise
# 'securesystemslib.exceptions.FormatError'. The actual content of the map
# file is validated later on in this method.
securesystemslib.formats.PATH_SCHEMA.check_match(map_file)
# A dictionary mapping repositories to TUF updaters.
self.repository_names_to_updaters = {}
try:
# The map file dictionary that associates targets with repositories.
self.map_file = securesystemslib.util.load_json_file(map_file)
except (securesystemslib.exceptions.Error, IOError) as e:
raise tuf.exceptions.Error('Cannot load the map file: ' + str(e))
# Raise securesystemslib.exceptions.FormatError if the map file is
# improperly formatted.
tuf.formats.MAPFILE_SCHEMA.check_match(self.map_file)
# Save the "repositories" entry of the map file, with the following
# example format:
#
# "repositories": {
# "Django": ["https://djangoproject.com/"],
# "PyPI": ["https://pypi.python.org/"]
# }
self.repository_names_to_mirrors = self.map_file['repositories']
def get_valid_targetinfo(self, target_filename, match_custom_field=True):
"""
<Purpose>
Get valid targetinfo, if any, for the given 'target_filename'. The map
file controls the targetinfo returned (see TAP 4). Return a dict of the
form {updater1: targetinfo, updater2: targetinfo, ...}, where the dict
keys are updater objects, and the dict values the matching targetinfo for
'target_filename'.
<Arguments>
target_filename:
The relative path of the target file to update.
match_custom_field:
Boolean that indicates whether the optional custom field in targetinfo
should match across the targetinfo provided by the threshold of
repositories.
<Exceptions>
tuf.exceptions.FormatError, if the argument is improperly formatted.
tuf.exceptions.Error, if the required local metadata directory or the
Root file does not exist.
tuf.exceptions.UnknownTargetError, if the repositories in the map file do
not agree on the target, or none of them have signed for the target.
<Side Effects>
None.
<Returns>
A dict of the form: {updater1: targetinfo, updater2: targetinfo, ...}.
The targetinfo (conformant with tuf.formats.TARGETINFO_SCHEMA) is for
'target_filename'.
"""
# Is the argument properly formatted? If not, raise
# 'tuf.exceptions.FormatError'.
tuf.formats.RELPATH_SCHEMA.check_match(target_filename)
# TAP 4 requires that the following attributes be present in mappings:
# "paths", "repositories", "terminating", and "threshold".
tuf.formats.MAPPING_SCHEMA.check_match(self.map_file['mapping'])
# Set the top-level directory containing the metadata for each repository.
repositories_directory = tuf.settings.repositories_directory
# Verify that the required local directories exist for each repository.
self._verify_metadata_directories(repositories_directory)
# Iterate mappings.
# [{"paths": [], "repositories": [], "terminating": Boolean, "threshold":
# NUM}, ...]
for mapping in self.map_file['mapping']:
logger.debug('Interrogating mappings..' + repr(mapping))
if not self._target_matches_path_pattern(
target_filename, mapping['paths']):
# The mapping is irrelevant to the target file. Try the next one, if
# any.
continue
# The mapping is relevant to the target...
else:
# Do the repositories in the mapping provide a threshold of matching
# targetinfo?
valid_targetinfo = self._matching_targetinfo(target_filename,
mapping, match_custom_field)
if valid_targetinfo:
return valid_targetinfo
else:
# If we are here, it means either (1) the mapping is irrelevant to
# the target, (2) the targets were missing from all repositories in
# this mapping, or (3) the targets on all repositories did not match.
# Whatever the case may be, are we allowed to continue to the next
# mapping? Let's check the terminating entry!
if not mapping['terminating']:
logger.debug('The mapping was irrelevant to the target, and'
' "terminating" was set to False. Trying the next mapping...')
continue
else:
raise tuf.exceptions.UnknownTargetError('The repositories in the'
' mapping do not agree on the target, or none of them have'
' signed for the target, and "terminating" was set to True.')
# If we are here, it means either there were no mappings, or none of the
# mappings provided the target.
logger.debug('Did not find valid targetinfo for ' + repr(target_filename))
raise tuf.exceptions.UnknownTargetError('The repositories in the map'
' file do not agree on the target, or none of them have signed'
' for the target.')
def _verify_metadata_directories(self, repositories_directory):
# Iterate 'self.repository_names_to_mirrors' and verify that the expected
# local files and directories exist. TAP 4 requires a separate local
# directory for each repository.
for repository_name in self.repository_names_to_mirrors:
logger.debug('Interrogating repository: ' + repr(repository_name))
# Each repository must cache its metadata in a separate location.
repository_directory = os.path.join(repositories_directory,
repository_name)
if not os.path.isdir(repository_directory):
raise tuf.exceptions.Error('The metadata directory'
' for ' + repr(repository_name) + ' must exist'
' at ' + repr(repository_directory))
else:
logger.debug('Found local directory for ' + repr(repository_name))
# The latest known root metadata file must also exist on disk.
root_file = os.path.join(
repository_directory, 'metadata', 'current', 'root.json')
if not os.path.isfile(root_file):
raise tuf.exceptions.Error(
'The Root file must exist at ' + repr(root_file))
else:
logger.debug('Found local Root file at ' + repr(root_file))
def _matching_targetinfo(
self, target_filename, mapping, match_custom_field=True):
valid_targetinfo = {}
# Retrieve the targetinfo from each repository using the underlying
# Updater() instance.
for repository_name in mapping['repositories']:
logger.debug('Retrieving targetinfo for ' + repr(target_filename) +
' from repository...')
try:
targetinfo, updater = self._update_from_repository(
repository_name, target_filename)
except (tuf.exceptions.UnknownTargetError, tuf.exceptions.Error):
continue
valid_targetinfo[updater] = targetinfo
matching_targetinfo = {}
logger.debug('Verifying that a threshold of targetinfo are equal...')
# Iterate 'valid_targetinfo', looking for a threshold number of matches
# for 'targetinfo'. The first targetinfo to reach the required threshold
# is returned. For example, suppose the following list of targetinfo and
# a threshold of 2:
# [A, B, C, B, A, C]
# In this case, targetinfo B is returned.
for valid_updater, compared_targetinfo in six.iteritems(valid_targetinfo):
if not self._targetinfo_match(
targetinfo, compared_targetinfo, match_custom_field):
continue
else:
matching_targetinfo[valid_updater] = targetinfo
if not len(matching_targetinfo) >= mapping['threshold']:
continue
else:
logger.debug('Found a threshold of matching targetinfo!')
# We now have a targetinfo (that matches across a threshold of
# repositories as instructed by the map file), along with the
# updaters that sign for it.
logger.debug(
'Returning updaters for targetinfo: ' + repr(targetinfo))
return matching_targetinfo
return None
def _targetinfo_match(self, targetinfo1, targetinfo2, match_custom_field=True):
if match_custom_field:
return (targetinfo1 == targetinfo2)
else:
targetinfo1_without_custom = copy.deepcopy(targetinfo1)
targetinfo2_without_custom = copy.deepcopy(targetinfo2)
targetinfo1_without_custom['fileinfo'].pop('custom', None)
targetinfo2_without_custom['fileinfo'].pop('custom', None)
return (targetinfo1_without_custom == targetinfo2_without_custom)
def _target_matches_path_pattern(self, target_filename, path_patterns):
for path_pattern in path_patterns:
logger.debug('Interrogating pattern ' + repr(path_pattern) + 'for'
' target: ' + repr(target_filename))
# Example: "foo.tgz" should match with "/*.tgz". Make sure to strip any
# leading path separators so that a match is made if a repo maintainer
# uses a leading separator with a delegated glob pattern, but a client
# doesn't include one when a target file is requested.
if fnmatch.fnmatch(target_filename.lstrip(os.sep), path_pattern.lstrip(os.sep)):
logger.debug('Found a match for ' + repr(target_filename))
return True
else:
logger.debug('Continue searching for relevant paths.')
continue
# If we are here, then none of the paths are relevant to the target.
logger.debug('None of the paths are relevant.')
return False
def get_updater(self, repository_name):
"""
<Purpose>
Get the updater instance corresponding to 'repository_name'.
<Arguments>
repository_name:
The name of the repository as it appears in the map file. For example,
"Django" and "PyPI" in the "repositories" entry of the map file.
"repositories": {
"Django": ["https://djangoproject.com/"],
"PyPI": ["https://pypi.python.org/"]
}
<Exceptions>
tuf.exceptions.FormatError, if any of the arguments are improperly
formatted.
<Side Effects>
None.
<Returns>
Returns the Updater() instance for 'repository_name'. If the instance
does not exist, return None.
"""
# Are the arguments properly formatted? If not, raise
# 'tuf.exceptions.FormatError'.
tuf.formats.NAME_SCHEMA.check_match(repository_name)
updater = self.repository_names_to_updaters.get(repository_name)
if not updater:
if repository_name not in self.repository_names_to_mirrors:
return None
else:
# Create repository mirrors object needed by the
# tuf.client.updater.Updater(). Each 'repository_name' can have more
# than one mirror.
mirrors = {}
for url in self.repository_names_to_mirrors[repository_name]:
mirrors[url] = {
'url_prefix': url,
'metadata_path': 'metadata',
'targets_path': 'targets',
'confined_target_dirs': ['']}
try:
# NOTE: State (e.g., keys) should NOT be shared across different
# updater instances.
logger.debug('Adding updater for ' + repr(repository_name))
updater = tuf.client.updater.Updater(repository_name, mirrors)
except Exception:
return None
else:
self.repository_names_to_updaters[repository_name] = updater
else:
logger.debug('Found an updater for ' + repr(repository_name))
# Ensure the updater's metadata is the latest before returning it.
updater.refresh()
return updater
def _update_from_repository(self, repository_name, target_filename):
updater = self.get_updater(repository_name)
if not updater:
raise tuf.exceptions.Error(
'Cannot load updater for ' + repr(repository_name))
else:
# Get one valid target info from the Updater object.
# 'tuf.exceptions.UnknownTargetError' raised by get_one_valid_targetinfo
# if a valid target cannot be found.
return updater.get_one_valid_targetinfo(target_filename), updater
class Updater(object):
"""
<Purpose>
Provide a class that can download target files securely. The updater
keeps track of currently and previously trusted metadata, target files
available to the client, target file attributes such as file size and
hashes, key and role information, metadata signatures, and the ability
to determine when the download of a file should be permitted.
<Updater Attributes>
self.metadata:
Dictionary holding the currently and previously trusted metadata.
Example: {'current': {'root': ROOT_SCHEMA,
'targets':TARGETS_SCHEMA, ...},
'previous': {'root': ROOT_SCHEMA,
'targets':TARGETS_SCHEMA, ...}}
self.metadata_directory:
The directory where trusted metadata is stored.
self.versioninfo:
A cache of version numbers for the roles available on the repository.
Example: {'targets.json': {'version': 128}, ...}
self.mirrors:
The repository mirrors from which metadata and targets are available.
Conformant to 'tuf.formats.MIRRORDICT_SCHEMA'.
self.repository_name:
The name of the updater instance.
<Updater Methods>
refresh():
This method downloads, verifies, and loads metadata for the top-level
roles in a specific order (i.e., timestamp -> snapshot -> root -> targets)
The expiration time for downloaded metadata is also verified.
The metadata for delegated roles are not refreshed by this method, but by
the method that returns targetinfo (i.e., get_one_valid_targetinfo()).
The refresh() method should be called by the client before any target
requests.
get_one_valid_targetinfo(file_path):
Returns the target information for a specific file identified by its file
path. This target method also downloads the metadata of updated targets.
updated_targets(targets, destination_directory):
After the client has retrieved the target information for those targets
they are interested in updating, they would call this method to determine
which targets have changed from those saved locally on disk. All the
targets that have changed are returns in a list. From this list, they
can request a download by calling 'download_target()'.
download_target(target, destination_directory):
This method performs the actual download of the specified target. The
file is saved to the 'destination_directory' argument.
remove_obsolete_targets(destination_directory):
Any files located in 'destination_directory' that were previously
served by the repository but have since been removed, can be deleted
from disk by the client by calling this method.
Note: The methods listed above are public and intended for the software
updater integrating TUF with this module. All other methods that may begin
with a single leading underscore are non-public and only used internally.
updater.py is not subclassed in TUF, nor is it designed to be subclassed,
so double leading underscores is not used.
http://www.python.org/dev/peps/pep-0008/#method-names-and-instance-variables
"""
def __init__(self, repository_name, repository_mirrors):
"""
<Purpose>
Constructor. Instantiating an updater object causes all the metadata
files for the top-level roles to be read from disk, including the key and
role information for the delegated targets of 'targets'. The actual
metadata for delegated roles is not loaded in __init__. The metadata for
these delegated roles, including nested delegated roles, are loaded,
updated, and saved to the 'self.metadata' store, as needed, by
get_one_valid_targetinfo().
The initial set of metadata files are provided by the software update
system utilizing TUF.
In order to use an updater, the following directories must already
exist locally:
{tuf.settings.repositories_directory}/{repository_name}/metadata/current
{tuf.settings.repositories_directory}/{repository_name}/metadata/previous
and, at a minimum, the root metadata file must exist:
{tuf.settings.repositories_directory}/{repository_name}/metadata/current/root.json
<Arguments>
repository_name:
The name of the repository.
repository_mirrors:
A dictionary holding repository mirror information, conformant to
'tuf.formats.MIRRORDICT_SCHEMA'. This dictionary holds
information such as the directory containing the metadata and target
files, the server's URL prefix, and the target content directories the
client should be confined to.
repository_mirrors = {'mirror1': {'url_prefix': 'http://localhost:8001',
'metadata_path': 'metadata',
'targets_path': 'targets',
'confined_target_dirs': ['']}}
<Exceptions>
securesystemslib.exceptions.FormatError:
If the arguments are improperly formatted.
tuf.exceptions.RepositoryError:
If there is an error with the updater's repository files, such
as a missing 'root.json' file.
<Side Effects>
Th metadata files (e.g., 'root.json', 'targets.json') for the top- level
roles are read from disk and stored in dictionaries. In addition, the
key and roledb modules are populated with 'repository_name' entries.
<Returns>
None.
"""
# Do the arguments have the correct format?
# These checks ensure the arguments have the appropriate
# number of objects and object types and that all dict
# keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mistmatch.
securesystemslib.formats.NAME_SCHEMA.check_match(repository_name)
tuf.formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors)
# Save the validated arguments.
self.repository_name = repository_name
self.mirrors = repository_mirrors
# Store the trusted metadata read from disk.
self.metadata = {}
# Store the currently trusted/verified metadata.
self.metadata['current'] = {}
# Store the previously trusted/verified metadata.
self.metadata['previous'] = {}
# Store the version numbers of roles available on the repository. The dict
# keys are paths, and the dict values versioninfo data. This information
# can help determine whether a metadata file has changed and needs to be
# re-downloaded.
self.versioninfo = {}
# Store the file information of the root and snapshot roles. The dict keys
# are paths, the dict values fileinfo data. This information can help
# determine whether a metadata file has changed and so needs to be
# re-downloaded.
self.fileinfo = {}
# Store the location of the client's metadata directory.
self.metadata_directory = {}
# Store the 'consistent_snapshot' of the Root role. This setting
# determines if metadata and target files downloaded from remote
# repositories include the digest.
self.consistent_snapshot = False
# Ensure the repository metadata directory has been set.
if tuf.settings.repositories_directory is None:
raise tuf.exceptions.RepositoryError('The TUF update client'
' module must specify the directory containing the local repository'
' files. "tuf.settings.repositories_directory" MUST be set.')
# Set the path for the current set of metadata files.
repositories_directory = tuf.settings.repositories_directory
repository_directory = os.path.join(repositories_directory, self.repository_name)
current_path = os.path.join(repository_directory, 'metadata', 'current')
# Ensure the current path is valid/exists before saving it.
if not os.path.exists(current_path):
raise tuf.exceptions.RepositoryError('Missing'
' ' + repr(current_path) + '. This path must exist and, at a minimum,'
' contain the Root metadata file.')
self.metadata_directory['current'] = current_path
# Set the path for the previous set of metadata files.
previous_path = os.path.join(repository_directory, 'metadata', 'previous')
# Ensure the previous path is valid/exists.
if not os.path.exists(previous_path):
raise tuf.exceptions.RepositoryError('Missing ' + repr(previous_path) + '.'
' This path MUST exist.')
self.metadata_directory['previous'] = previous_path
# Load current and previous metadata.
for metadata_set in ['current', 'previous']:
for metadata_role in ['root', 'targets', 'snapshot', 'timestamp']:
self._load_metadata_from_file(metadata_set, metadata_role)
# Raise an exception if the repository is missing the required 'root'
# metadata.
if 'root' not in self.metadata['current']:
raise tuf.exceptions.RepositoryError('No root of trust!'
' Could not find the "root.json" file.')
def __str__(self):
"""
The string representation of an Updater object.
"""
return self.repository_name
def _load_metadata_from_file(self, metadata_set, metadata_role):
"""
<Purpose>
Non-public method that loads current or previous metadata if there is a
local file. If the expected file belonging to 'metadata_role' (e.g.,
'root.json') cannot be loaded, raise an exception. The extracted metadata
object loaded from file is saved to the metadata store (i.e.,
self.metadata).
<Arguments>
metadata_set:
The string 'current' or 'previous', depending on whether one wants to
load the currently or previously trusted metadata file.
metadata_role:
The name of the metadata. This is a role name and should
not end in '.json'. Examples: 'root', 'targets', 'unclaimed'.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the role object loaded for 'metadata_role' is improperly formatted.
securesystemslib.exceptions.Error:
If there was an error importing a delegated role of 'metadata_role'
or the 'metadata_set' is not one currently supported.
<Side Effects>
If the metadata is loaded successfully, it is saved to the metadata
store. If 'metadata_role' is 'root', the role and key databases
are reloaded. If 'metadata_role' is a target metadata, all its
delegated roles are refreshed.
<Returns>
None.
"""
# Ensure we have a valid metadata set.
if metadata_set not in ['current', 'previous']:
raise securesystemslib.exceptions.Error(
'Invalid metadata set: ' + repr(metadata_set))
# Save and construct the full metadata path.
metadata_directory = self.metadata_directory[metadata_set]
metadata_filename = metadata_role + '.json'
metadata_filepath = os.path.join(metadata_directory, metadata_filename)
# Ensure the metadata path is valid/exists, else ignore the call.
if os.path.exists(metadata_filepath):
# Load the file. The loaded object should conform to
# 'tuf.formats.SIGNABLE_SCHEMA'.
try:
metadata_signable = securesystemslib.util.load_json_file(
metadata_filepath)
# Although the metadata file may exist locally, it may not
# be a valid json file. On the next refresh cycle, it will be
# updated as required. If Root if cannot be loaded from disk
# successfully, an exception should be raised by the caller.
except securesystemslib.exceptions.Error:
return
tuf.formats.check_signable_object_format(metadata_signable)
# Extract the 'signed' role object from 'metadata_signable'.
metadata_object = metadata_signable['signed']
# Save the metadata object to the metadata store.
self.metadata[metadata_set][metadata_role] = metadata_object
# If 'metadata_role' is 'root' or targets metadata, the key and role
# databases must be rebuilt. If 'root', ensure self.consistent_snaptshots
# is updated.
if metadata_set == 'current':
if metadata_role == 'root':
self._rebuild_key_and_role_db()
self.consistent_snapshot = metadata_object['consistent_snapshot']
elif metadata_object['_type'] == 'targets':
# TODO: Should we also remove the keys of the delegated roles?
self._import_delegations(metadata_role)
def _rebuild_key_and_role_db(self):
"""
<Purpose>
Non-public method that rebuilds the key and role databases from the
currently trusted 'root' metadata object extracted from 'root.json'.
This private method is called when a new/updated 'root' metadata file is
loaded or when updater.refresh() is called. This method will only store
the role information of the top-level roles (i.e., 'root', 'targets',
'snapshot', 'timestamp').
<Arguments>
None.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the 'root' metadata is improperly formatted.
securesystemslib.exceptions.Error:
If there is an error loading a role contained in the 'root'
metadata.
<Side Effects>
The key and role databases are reloaded for the top-level roles.
<Returns>
None.
"""
# Clobbering this means all delegated metadata files are rendered outdated
# and will need to be reloaded. However, reloading the delegated metadata
# files is avoided here because fetching target information with
# get_one_valid_targetinfo() always causes a refresh of these files. The
# metadata files for delegated roles are also not loaded when the
# repository is first instantiated. Due to this setup, reloading delegated
# roles is not required here.
tuf.keydb.create_keydb_from_root_metadata(self.metadata['current']['root'],
self.repository_name)
tuf.roledb.create_roledb_from_root_metadata(self.metadata['current']['root'],
self.repository_name)
def _import_delegations(self, parent_role):
"""
<Purpose>
Non-public method that imports all the roles delegated by 'parent_role'.
<Arguments>
parent_role:
The role whose delegations will be imported.
<Exceptions>
securesystemslib.exceptions.FormatError:
If a key attribute of a delegated role's signing key is
improperly formatted.
securesystemslib.exceptions.Error:
If the signing key of a delegated role cannot not be loaded.
<Side Effects>
The key and role databases are modified to include the newly loaded roles
delegated by 'parent_role'.
<Returns>
None.
"""
current_parent_metadata = self.metadata['current'][parent_role]
if 'delegations' not in current_parent_metadata:
return
# This could be quite slow with a large number of delegations.
keys_info = current_parent_metadata['delegations'].get('keys', {})
roles_info = current_parent_metadata['delegations'].get('roles', [])
logger.debug('Adding roles delegated from ' + repr(parent_role) + '.')
# Iterate the keys of the delegated roles of 'parent_role' and load them.
for keyid, keyinfo in six.iteritems(keys_info):
if keyinfo['keytype'] in ['rsa', 'ed25519', 'ecdsa-sha2-nistp256']:
# We specify the keyid to ensure that it's the correct keyid
# for the key.
try:
# The repo may have used hashing algorithms for the generated keyids
# that doesn't match the client's set of hash algorithms. Make sure
# to only used the repo's selected hashing algorithms.
hash_algorithms = securesystemslib.settings.HASH_ALGORITHMS
securesystemslib.settings.HASH_ALGORITHMS = keyinfo['keyid_hash_algorithms']
key, keyids = securesystemslib.keys.format_metadata_to_key(keyinfo)
securesystemslib.settings.HASH_ALGORITHMS = hash_algorithms
for key_id in keyids:
key['keyid'] = key_id
tuf.keydb.add_key(key, keyid=None, repository_name=self.repository_name)
except tuf.exceptions.KeyAlreadyExistsError:
pass
except (securesystemslib.exceptions.FormatError, securesystemslib.exceptions.Error):
logger.exception('Invalid key for keyid: ' + repr(keyid) + '.')
logger.error('Aborting role delegation for parent role ' + parent_role + '.')
raise
else:
logger.warning('Invalid key type for ' + repr(keyid) + '.')
continue
# Add the roles to the role database.
for roleinfo in roles_info:
try:
# NOTE: tuf.roledb.add_role will take care of the case where rolename
# is None.
rolename = roleinfo.get('name')
logger.debug('Adding delegated role: ' + str(rolename) + '.')
tuf.roledb.add_role(rolename, roleinfo, self.repository_name)
except tuf.exceptions.RoleAlreadyExistsError:
logger.warning('Role already exists: ' + rolename)
except Exception:
logger.exception('Failed to add delegated role: ' + repr(rolename) + '.')
raise
def refresh(self, unsafely_update_root_if_necessary=True):
"""
<Purpose>
Update the latest copies of the metadata for the top-level roles. The
update request process follows a specific order to ensure the metadata
files are securely updated:
timestamp -> snapshot -> root (if necessary) -> targets.
Delegated metadata is not refreshed by this method. After this method is
called, the use of get_one_valid_targetinfo() will update delegated
metadata, when required. Calling refresh() ensures that top-level
metadata is up-to-date, so that the target methods can refer to the
latest available content. Thus, refresh() should always be called by the
client before any requests of target file information.
The expiration time for downloaded metadata is also verified, including
local metadata that the repository claims is up to date.
If the refresh fails for any reason, then unless
'unsafely_update_root_if_necessary' is set, refresh will be retried once
after first attempting to update the root metadata file. Only after this
check will the exceptions listed here potentially be raised.
<Arguments>
unsafely_update_root_if_necessary:
Boolean that indicates whether to unsafely update the Root metadata if
any of the top-level metadata cannot be downloaded successfully. The
Root role is unsafely updated if its current version number is unknown.
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
If the metadata for any of the top-level roles cannot be updated.
tuf.exceptions.ExpiredMetadataError:
If any of the top-level metadata is expired (whether a new version was
downloaded expired or no new version was found and the existing
version is now expired).
<Side Effects>
Updates the metadata files of the top-level roles with the latest
information.
<Returns>
None.
"""
# Do the arguments have the correct format?
# This check ensures the arguments have the appropriate
# number of objects and object types, and that all dict
# keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fail.
securesystemslib.formats.BOOLEAN_SCHEMA.check_match(
unsafely_update_root_if_necessary)
# Update the top-level metadata. The _update_metadata_if_changed() and
# _update_metadata() calls below do NOT perform an update if there
# is insufficient trusted signatures for the specified metadata.
# Raise 'tuf.exceptions.NoWorkingMirrorError' if an update fails.
root_metadata = self.metadata['current']['root']
try:
self._ensure_not_expired(root_metadata, 'root')
except tuf.exceptions.ExpiredMetadataError:
# Raise 'tuf.exceptions.NoWorkingMirrorError' if a valid (not
# expired, properly signed, and valid metadata) 'root.json' cannot be
# installed.
if unsafely_update_root_if_necessary:
logger.info('Expired Root metadata was loaded from disk.'
' Try to update it now.' )
# The caller explicitly requested not to unsafely fetch an expired Root.
else:
logger.info('An expired Root metadata was loaded and must be updated.')
raise
# TODO: How should the latest root metadata be verified? According to the
# currently trusted root keys? What if all of the currently trusted
# root keys have since been revoked by the latest metadata? Alternatively,
# do we blindly trust the downloaded root metadata here?
self._update_root_metadata(root_metadata)
# Ensure that the role and key information of the top-level roles is the
# latest. We do this whether or not Root needed to be updated, in order to
# ensure that, e.g., the entries in roledb for top-level roles are
# populated with expected keyid info so that roles can be validated. In
# certain circumstances, top-level metadata might be missing because it was
# marked obsolete and deleted after a failed attempt, and thus we should
# refresh them here as a protective measure. See Issue #736.
self._rebuild_key_and_role_db()
self.consistent_snapshot = \
self.metadata['current']['root']['consistent_snapshot']
# Use default but sane information for timestamp metadata, and do not
# require strict checks on its required length.
self._update_metadata('timestamp', DEFAULT_TIMESTAMP_UPPERLENGTH)
# TODO: After fetching snapshot.json, we should either verify the root
# fileinfo referenced there matches what was fetched earlier in
# _update_root_metadata() or make another attempt to download root.json.
self._update_metadata_if_changed('snapshot',
referenced_metadata='timestamp')
self._update_metadata_if_changed('targets')
def _update_root_metadata(self, current_root_metadata):
"""
<Purpose>
The root file must be signed by the current root threshold and keys as
well as the previous root threshold and keys. The update process for root
files means that each intermediate root file must be downloaded, to build
a chain of trusted root keys from keys already trusted by the client:
1.root -> 2.root -> 3.root
3.root must be signed by the threshold and keys of 2.root, and 2.root
must be signed by the threshold and keys of 1.root.
<Arguments>
current_root_metadata:
The currently held version of root.
<Side Effects>
Updates the root metadata files with the latest information.
<Returns>
None.
"""
def neither_403_nor_404(mirror_error):
if isinstance(mirror_error, requests.exceptions.HTTPError):
if mirror_error.response.status_code in {403, 404}:
return False
return True
# Temporarily set consistent snapshot. Will be updated to whatever is set
# in the latest root.json after running through the intermediates with
# _update_metadata().
self.consistent_snapshot = True
# Following the spec, try downloading the N+1th root for a certain maximum
# number of times.
lower_bound = current_root_metadata['version'] + 1
upper_bound = lower_bound + tuf.settings.MAX_NUMBER_ROOT_ROTATIONS
# Try downloading the next root.
for next_version in range(lower_bound, upper_bound):
try:
# Thoroughly verify it.
self._update_metadata('root', DEFAULT_ROOT_UPPERLENGTH,
version=next_version)
# When we run into HTTP 403/404 error from ALL mirrors, break out of
# loop, because the next root metadata file is most likely missing.
except tuf.exceptions.NoWorkingMirrorError as exception:
for mirror_error in exception.mirror_errors.values():
# Otherwise, reraise the error, because it is not a simple HTTP
# error.
if neither_403_nor_404(mirror_error):
logging.exception('Misc error for root version '+str(next_version))
raise
else:
# Calling this function should give us a detailed stack trace
# including an HTTP error code, if any.
logging.exception('HTTP error for root version '+str(next_version))
# If we are here, then we ran into only 403 / 404 errors, which are
# good reasons to suspect that the next root metadata file does not
# exist.
break
# Ensure that the role and key information of the top-level roles is the
# latest. We do this whether or not Root needed to be updated, in order
# to ensure that, e.g., the entries in roledb for top-level roles are
# populated with expected keyid info so that roles can be validated. In
# certain circumstances, top-level metadata might be missing because it
# was marked obsolete and deleted after a failed attempt, and thus we
# should refresh them here as a protective measure. See Issue #736.
self._rebuild_key_and_role_db()
# Set our consistent snapshot property to what the latest root has said.
self.consistent_snapshot = \
self.metadata['current']['root']['consistent_snapshot']
def _check_hashes(self, file_object, trusted_hashes):
"""
<Purpose>
Non-public method that verifies multiple secure hashes of the downloaded
file 'file_object'. If any of these fail it raises an exception. This is
to conform with the TUF spec, which support clients with different hashing
algorithms. The 'hash.py' module is used to compute the hashes of
'file_object'.
<Arguments>
file_object:
A 'securesystemslib.util.TempFile' file-like object. 'file_object'
ensures that a read() without a size argument properly reads the entire
file.
trusted_hashes:
A dictionary with hash-algorithm names as keys and hashes as dict values.
The hashes should be in the hexdigest format. Should be Conformant to
'securesystemslib.formats.HASHDICT_SCHEMA'.
<Exceptions>
securesystemslib.exceptions.BadHashError, if the hashes don't match.
<Side Effects>
Hash digest object is created using the 'securesystemslib.hash' module.
<Returns>
None.
"""
# Verify each trusted hash of 'trusted_hashes'. If all are valid, simply
# return.
for algorithm, trusted_hash in six.iteritems(trusted_hashes):
digest_object = securesystemslib.hash.digest(algorithm)
digest_object.update(file_object.read())
computed_hash = digest_object.hexdigest()
# Raise an exception if any of the hashes are incorrect.
if trusted_hash != computed_hash:
raise securesystemslib.exceptions.BadHashError(trusted_hash,
computed_hash)
else:
logger.info('The file\'s ' + algorithm + ' hash is'
' correct: ' + trusted_hash)
def _hard_check_file_length(self, file_object, trusted_file_length):
"""
<Purpose>
Non-public method that ensures the length of 'file_object' is strictly
equal to 'trusted_file_length'. This is a deliberately redundant
implementation designed to complement
tuf.download._check_downloaded_length().
<Arguments>
file_object:
A 'securesystemslib.util.TempFile' file-like object. 'file_object'
ensures that a read() without a size argument properly reads the entire
file.
trusted_file_length:
A non-negative integer that is the trusted length of the file.
<Exceptions>
tuf.exceptions.DownloadLengthMismatchError, if the lengths do not match.
<Side Effects>
Reads the contents of 'file_object' and logs a message if 'file_object'
matches the trusted length.
<Returns>
None.
"""
# Read the entire contents of 'file_object', a
# 'securesystemslib.util.TempFile' file-like object that ensures the entire
# file is read.
observed_length = len(file_object.read())
# Return and log a message if the length 'file_object' is equal to
# 'trusted_file_length', otherwise raise an exception. A hard check
# ensures that a downloaded file strictly matches a known, or trusted,
# file length.
if observed_length != trusted_file_length:
raise tuf.exceptions.DownloadLengthMismatchError(trusted_file_length,
observed_length)
else:
logger.debug('Observed length (' + str(observed_length) +\
') == trusted length (' + str(trusted_file_length) + ')')
def _soft_check_file_length(self, file_object, trusted_file_length):
"""
<Purpose>
Non-public method that checks the trusted file length of a
'securesystemslib.util.TempFile' file-like object. The length of the file
must be less than or equal to the expected length. This is a deliberately
redundant implementation designed to complement
tuf.download._check_downloaded_length().
<Arguments>
file_object:
A 'securesystemslib.util.TempFile' file-like object. 'file_object'
ensures that a read() without a size argument properly reads the entire
file.
trusted_file_length:
A non-negative integer that is the trusted length of the file.
<Exceptions>
tuf.exceptions.DownloadLengthMismatchError, if the lengths do
not match.
<Side Effects>
Reads the contents of 'file_object' and logs a message if 'file_object'
is less than or equal to the trusted length.
<Returns>
None.
"""
# Read the entire contents of 'file_object', a
# 'securesystemslib.util.TempFile' file-like object that ensures the entire
# file is read.
observed_length = len(file_object.read())
# Return and log a message if 'file_object' is less than or equal to
# 'trusted_file_length', otherwise raise an exception. A soft check
# ensures that an upper bound restricts how large a file is downloaded.
if observed_length > trusted_file_length:
raise tuf.exceptions.DownloadLengthMismatchError(trusted_file_length,
observed_length)
else:
logger.debug('Observed length (' + str(observed_length) +\
') <= trusted length (' + str(trusted_file_length) + ')')
def _get_target_file(self, target_filepath, file_length, file_hashes):
"""
<Purpose>
Non-public method that safely (i.e., the file length and hash are
strictly equal to the trusted) downloads a target file up to a certain
length, and checks its hashes thereafter.
<Arguments>
target_filepath:
The target filepath (relative to the repository targets directory)
obtained from TUF targets metadata.
file_length:
The expected compressed length of the target file. If the file is not
compressed, then it will simply be its uncompressed length.
file_hashes:
The expected hashes of the target file.
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
The target could not be fetched. This is raised only when all known
mirrors failed to provide a valid copy of the desired target file.
<Side Effects>
The target file is downloaded from all known repository mirrors in the
worst case. If a valid copy of the target file is found, it is stored in
a temporary file and returned.
<Returns>
A 'securesystemslib.util.TempFile' file-like object containing the target.
"""
# Define a callable function that is passed as an argument to _get_file()
# and called. The 'verify_target_file' function ensures the file length
# and hashes of 'target_filepath' are strictly equal to the trusted values.
def verify_target_file(target_file_object):
# Every target file must have its length and hashes inspected.
self._hard_check_file_length(target_file_object, file_length)
self._check_hashes(target_file_object, file_hashes)
if self.consistent_snapshot:
# Note: values() does not return a list in Python 3. Use list()
# on values() for Python 2+3 compatibility.
target_digest = list(file_hashes.values()).pop()
dirname, basename = os.path.split(target_filepath)
target_filepath = os.path.join(dirname, target_digest + '.' + basename)
return self._get_file(target_filepath, verify_target_file,
'target', file_length, download_safely=True)
def _verify_uncompressed_metadata_file(self, metadata_file_object,
metadata_role):
"""
<Purpose>
Non-public method that verifies an uncompressed metadata file. An
exception is raised if 'metadata_file_object is invalid. There is no
return value.
<Arguments>
metadata_file_object:
A 'securesystemslib.util.TempFile' instance containing the metadata
file. 'metadata_file_object' ensures the entire file is returned with
read().
metadata_role:
The role name of the metadata (e.g., 'root', 'targets',
'unclaimed').
<Exceptions>
securesystemslib.exceptions.FormatError:
In case the metadata file is valid JSON, but not valid TUF metadata.
tuf.exceptions.InvalidMetadataJSONError:
In case the metadata file is not valid JSON.
tuf.exceptions.ReplayedMetadataError:
In case the downloaded metadata file is older than the current one.
tuf.exceptions.RepositoryError:
In case the repository is somehow inconsistent; e.g. a parent has not
delegated to a child (contrary to expectations).
tuf.SignatureError:
In case the metadata file does not have a valid signature.
<Side Effects>
The content of 'metadata_file_object' is read and loaded.
<Returns>
None.
"""
metadata = metadata_file_object.read().decode('utf-8')
try:
metadata_signable = securesystemslib.util.load_json_string(metadata)
except Exception as exception:
raise tuf.exceptions.InvalidMetadataJSONError(exception)
else:
# Ensure the loaded 'metadata_signable' is properly formatted. Raise
# 'securesystemslib.exceptions.FormatError' if not.
tuf.formats.check_signable_object_format(metadata_signable)
# Is 'metadata_signable' expired?
self._ensure_not_expired(metadata_signable['signed'], metadata_role)
# We previously verified version numbers in this function, but have since
# moved version number verification to the functions that retrieve
# metadata.
# Verify the signature on the downloaded metadata object.
valid = tuf.sig.verify(metadata_signable, metadata_role,
self.repository_name)
if not valid:
raise securesystemslib.exceptions.BadSignatureError(metadata_role)
def _get_metadata_file(self, metadata_role, remote_filename,
upperbound_filelength, expected_version):
"""
<Purpose>
Non-public method that tries downloading, up to a certain length, a
metadata file from a list of known mirrors. As soon as the first valid
copy of the file is found, the downloaded file is returned and the
remaining mirrors are skipped.
<Arguments>
metadata_role:
The role name of the metadata (e.g., 'root', 'targets', 'unclaimed').
remote_filename:
The relative file path (on the remove repository) of 'metadata_role'.
upperbound_filelength:
The expected length, or upper bound, of the metadata file to be
downloaded.
expected_version:
The expected and required version number of the 'metadata_role' file
downloaded. 'expected_version' is an integer.
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
The metadata could not be fetched. This is raised only when all known
mirrors failed to provide a valid copy of the desired metadata file.
<Side Effects>
The file is downloaded from all known repository mirrors in the worst
case. If a valid copy of the file is found, it is stored in a temporary
file and returned.
<Returns>
A 'securesystemslib.util.TempFile' file-like object containing the
metadata.
"""
file_mirrors = tuf.mirrors.get_list_of_mirrors('meta', remote_filename,
self.mirrors)
# file_mirror (URL): error (Exception)
file_mirror_errors = {}
file_object = None
for file_mirror in file_mirrors:
try:
file_object = tuf.download.unsafe_download(file_mirror,
upperbound_filelength)
# Verify 'file_object' according to the callable function.
# 'file_object' is also verified if decompressed above (i.e., the
# uncompressed version).
metadata_signable = \
securesystemslib.util.load_json_string(file_object.read().decode('utf-8'))
# Determine if the specification version number is supported. It is
# assumed that "spec_version" is in (major.minor.fix) format, (for
# example: "1.4.3") and that releases with the same major version
# number maintain backwards compatibility. Consequently, if the major
# version number of new metadata equals our expected major version
# number, the new metadata is safe to parse.
try:
metadata_spec_version = metadata_signable['signed']['spec_version']
metadata_spec_version_split = metadata_spec_version.split('.')
metadata_spec_major_version = int(metadata_spec_version_split[0])
metadata_spec_minor_version = int(metadata_spec_version_split[1])
code_spec_version_split = tuf.SPECIFICATION_VERSION.split('.')
code_spec_major_version = int(code_spec_version_split[0])
code_spec_minor_version = int(code_spec_version_split[1])
if metadata_spec_major_version != code_spec_major_version:
raise tuf.exceptions.UnsupportedSpecificationError(
'Downloaded metadata that specifies an unsupported '
'spec_version. This code supports major version number: ' +
repr(code_spec_major_version) + '; however, the obtained '
'metadata lists version number: ' + str(metadata_spec_version))
#report to user if minor versions do not match, continue with update
if metadata_spec_minor_version != code_spec_minor_version:
logger.info("Downloaded metadata that specifies a different minor " +
"spec_version. This code has version " +
str(tuf.SPECIFICATION_VERSION) +
" and the metadata lists version number " +
str(metadata_spec_version) +
". The update will continue as the major versions match.")
except (ValueError, TypeError):
raise securesystemslib.exceptions.FormatError('Improperly'
' formatted spec_version, which must be in major.minor.fix format')
# If the version number is unspecified, ensure that the version number
# downloaded is greater than the currently trusted version number for
# 'metadata_role'.
version_downloaded = metadata_signable['signed']['version']
if expected_version is not None:
# Verify that the downloaded version matches the version expected by
# the caller.
if version_downloaded != expected_version:
raise tuf.exceptions.BadVersionNumberError('Downloaded'
' version number: ' + repr(version_downloaded) + '. Version'
' number MUST be: ' + repr(expected_version))
# The caller does not know which version to download. Verify that the
# downloaded version is at least greater than the one locally
# available.
else:
# Verify that the version number of the locally stored
# 'timestamp.json', if available, is less than what was downloaded.
# Otherwise, accept the new timestamp with version number
# 'version_downloaded'.
try:
current_version = \
self.metadata['current'][metadata_role]['version']
if version_downloaded < current_version:
raise tuf.exceptions.ReplayedMetadataError(metadata_role,
version_downloaded, current_version)
except KeyError:
logger.info(metadata_role + ' not available locally.')
self._verify_uncompressed_metadata_file(file_object, metadata_role)
except Exception as exception:
# Remember the error from this mirror, and "reset" the target file.
logger.exception('Update failed from ' + file_mirror + '.')
file_mirror_errors[file_mirror] = exception
file_object = None
else:
break
if file_object:
return file_object
else:
logger.error('Failed to update ' + repr(remote_filename) + ' from all'
' mirrors: ' + repr(file_mirror_errors))
raise tuf.exceptions.NoWorkingMirrorError(file_mirror_errors)
def _verify_root_chain_link(self, rolename, current_root_metadata,
next_root_metadata):
if rolename != 'root':
return True
current_root_role = current_root_metadata['roles'][rolename]
# Verify next metadata with current keys/threshold
valid = tuf.sig.verify(next_root_metadata, rolename, self.repository_name,
current_root_role['threshold'], current_root_role['keyids'])
if not valid:
raise securesystemslib.exceptions.BadSignatureError('Root is not signed'
' by previous threshold of keys.')
def _get_file(self, filepath, verify_file_function, file_type, file_length,
download_safely=True):
"""
<Purpose>
Non-public method that tries downloading, up to a certain length, a
metadata or target file from a list of known mirrors. As soon as the first
valid copy of the file is found, the rest of the mirrors will be skipped.
<Arguments>
filepath:
The relative metadata or target filepath.
verify_file_function:
A callable function that expects a 'securesystemslib.util.TempFile'
file-like object and raises an exception if the file is invalid.
Target files and uncompressed versions of metadata may be verified with
'verify_file_function'.
file_type:
Type of data needed for download, must correspond to one of the strings
in the list ['meta', 'target']. 'meta' for metadata file type or
'target' for target file type. It should correspond to the
'securesystemslib.formats.NAME_SCHEMA' format.
file_length:
The expected length, or upper bound, of the target or metadata file to
be downloaded.
download_safely:
A boolean switch to toggle safe or unsafe download of the file.
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
The metadata could not be fetched. This is raised only when all known
mirrors failed to provide a valid copy of the desired metadata file.
<Side Effects>
The file is downloaded from all known repository mirrors in the worst
case. If a valid copy of the file is found, it is stored in a temporary
file and returned.
<Returns>
A 'securesystemslib.util.TempFile' file-like object containing the
metadata or target.
"""
file_mirrors = tuf.mirrors.get_list_of_mirrors(file_type, filepath,
self.mirrors)
# file_mirror (URL): error (Exception)
file_mirror_errors = {}
file_object = None
for file_mirror in file_mirrors:
try:
# TODO: Instead of the more fragile 'download_safely' switch, unroll
# the function into two separate ones: one for "safe" download, and the
# other one for "unsafe" download? This should induce safer and more
# readable code.
if download_safely:
file_object = tuf.download.safe_download(file_mirror, file_length)
else:
file_object = tuf.download.unsafe_download(file_mirror, file_length)
# Verify 'file_object' according to the callable function.
# 'file_object' is also verified if decompressed above (i.e., the
# uncompressed version).
verify_file_function(file_object)
except Exception as exception:
# Remember the error from this mirror, and "reset" the target file.
logger.exception('Update failed from ' + file_mirror + '.')
file_mirror_errors[file_mirror] = exception
file_object = None
else:
break
if file_object:
return file_object
else:
logger.error('Failed to update ' + repr(filepath) + ' from'
' all mirrors: ' + repr(file_mirror_errors))
raise tuf.exceptions.NoWorkingMirrorError(file_mirror_errors)
def _update_metadata(self, metadata_role, upperbound_filelength, version=None):
"""
<Purpose>
Non-public method that downloads, verifies, and 'installs' the metadata
belonging to 'metadata_role'. Calling this method implies that the
'metadata_role' on the repository is newer than the client's, and thus
needs to be re-downloaded. The current and previous metadata stores are
updated if the newly downloaded metadata is successfully downloaded and
verified. This method also assumes that the store of top-level metadata
is the latest and exists.
<Arguments>
metadata_role:
The name of the metadata. This is a role name and should not end
in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'.
upperbound_filelength:
The expected length, or upper bound, of the metadata file to be
downloaded.
version:
The expected and required version number of the 'metadata_role' file
downloaded. 'expected_version' is an integer.
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
The metadata cannot be updated. This is not specific to a single
failure but rather indicates that all possible ways to update the
metadata have been tried and failed.
<Side Effects>
The metadata file belonging to 'metadata_role' is downloaded from a
repository mirror. If the metadata is valid, it is stored in the
metadata store.
<Returns>
None.
"""
# Construct the metadata filename as expected by the download/mirror
# modules.
metadata_filename = metadata_role + '.json'
# Attempt a file download from each mirror until the file is downloaded and
# verified. If the signature of the downloaded file is valid, proceed,
# otherwise log a warning and try the next mirror. 'metadata_file_object'
# is the file-like object returned by 'download.py'. 'metadata_signable'
# is the object extracted from 'metadata_file_object'. Metadata saved to
# files are regarded as 'signable' objects, conformant to
# 'tuf.formats.SIGNABLE_SCHEMA'.
#
# Some metadata (presently timestamp) will be downloaded "unsafely", in the
# sense that we can only estimate its true length and know nothing about
# its version. This is because not all metadata will have other metadata
# for it; otherwise we will have an infinite regress of metadata signing
# for each other. In this case, we will download the metadata up to the
# best length we can get for it, not request a specific version, but
# perform the rest of the checks (e.g., signature verification).
remote_filename = metadata_filename
filename_version = ''
if self.consistent_snapshot and version:
filename_version = version
dirname, basename = os.path.split(remote_filename)
remote_filename = os.path.join(
dirname, str(filename_version) + '.' + basename)
metadata_file_object = \
self._get_metadata_file(metadata_role, remote_filename,
upperbound_filelength, version)
# The metadata has been verified. Move the metadata file into place.
# First, move the 'current' metadata file to the 'previous' directory
# if it exists.
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filename)
current_filepath = os.path.abspath(current_filepath)
securesystemslib.util.ensure_parent_dir(current_filepath)
previous_filepath = os.path.join(self.metadata_directory['previous'],
metadata_filename)
previous_filepath = os.path.abspath(previous_filepath)
if os.path.exists(current_filepath):
# Previous metadata might not exist, say when delegations are added.
securesystemslib.util.ensure_parent_dir(previous_filepath)
shutil.move(current_filepath, previous_filepath)
# Next, move the verified updated metadata file to the 'current' directory.
# Note that the 'move' method comes from securesystemslib.util's TempFile class.
# 'metadata_file_object' is an instance of securesystemslib.util.TempFile.
metadata_signable = \
securesystemslib.util.load_json_string(metadata_file_object.read().decode('utf-8'))
metadata_file_object.move(current_filepath)
# Extract the metadata object so we can store it to the metadata store.
# 'current_metadata_object' set to 'None' if there is not an object
# stored for 'metadata_role'.
updated_metadata_object = metadata_signable['signed']
current_metadata_object = self.metadata['current'].get(metadata_role)
self._verify_root_chain_link(metadata_role, current_metadata_object,
metadata_signable)
# Finally, update the metadata and fileinfo stores, and rebuild the
# key and role info for the top-level roles if 'metadata_role' is root.
# Rebuilding the key and role info is required if the newly-installed
# root metadata has revoked keys or updated any top-level role information.
logger.debug('Updated ' + repr(current_filepath) + '.')
self.metadata['previous'][metadata_role] = current_metadata_object
self.metadata['current'][metadata_role] = updated_metadata_object
self._update_versioninfo(metadata_filename)
def _update_metadata_if_changed(self, metadata_role,
referenced_metadata='snapshot'):
"""
<Purpose>
Non-public method that updates the metadata for 'metadata_role' if it has
changed. With the exception of the 'timestamp' role, all the top-level
roles are updated by this method. The 'timestamp' role is always
downloaded from a mirror without first checking if it has been updated;
it is updated in refresh() by calling _update_metadata('timestamp').
This method is also called for delegated role metadata, which are
referenced by 'snapshot'.
If the metadata needs to be updated but an update cannot be obtained,
this method will delete the file (with the exception of the root
metadata, which never gets removed without a replacement).
Due to the way in which metadata files are updated, it is expected that
'referenced_metadata' is not out of date and trusted. The refresh()
method updates the top-level roles in 'timestamp -> snapshot ->
root -> targets' order. For delegated metadata, the parent role is
updated before the delegated role. Taking into account that
'referenced_metadata' is updated and verified before 'metadata_role',
this method determines if 'metadata_role' has changed by checking
the 'meta' field of the newly updated 'referenced_metadata'.
<Arguments>
metadata_role:
The name of the metadata. This is a role name and should not end
in '.json'. Examples: 'root', 'targets', 'unclaimed'.
referenced_metadata:
This is the metadata that provides the role information for
'metadata_role'. For the top-level roles, the 'snapshot' role
is the referenced metadata for the 'root', and 'targets' roles.
The 'timestamp' metadata is always downloaded regardless. In
other words, it is updated by calling _update_metadata('timestamp')
and not by this method. The referenced metadata for 'snapshot'
is 'timestamp'. See refresh().
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
If 'metadata_role' could not be downloaded after determining that it
had changed.
tuf.exceptions.RepositoryError:
If the referenced metadata is missing.
<Side Effects>
If it is determined that 'metadata_role' has been updated, the metadata
store (i.e., self.metadata) is updated with the new metadata and the
affected stores modified (i.e., the previous metadata store is updated).
If the metadata is 'targets' or a delegated targets role, the role
database is updated with the new information, including its delegated
roles.
<Returns>
None.
"""
metadata_filename = metadata_role + '.json'
expected_versioninfo = None
# Ensure the referenced metadata has been loaded. The 'root' role may be
# updated without having 'snapshot' available.
if referenced_metadata not in self.metadata['current']:
raise tuf.exceptions.RepositoryError('Cannot update'
' ' + repr(metadata_role) + ' because ' + referenced_metadata + ' is'
' missing.')
# The referenced metadata has been loaded. Extract the new versioninfo for
# 'metadata_role' from it.
else:
logger.debug(repr(metadata_role) + ' referenced in ' +
repr(referenced_metadata)+ '. ' + repr(metadata_role) +
' may be updated.')
# Simply return if the metadata for 'metadata_role' has not been updated,
# according to the uncompressed metadata provided by the referenced
# metadata. The metadata is considered updated if its version number is
# strictly greater than its currently trusted version number.
expected_versioninfo = self.metadata['current'][referenced_metadata] \
['meta'][metadata_filename]
if not self._versioninfo_has_been_updated(metadata_filename,
expected_versioninfo):
logger.info(repr(metadata_filename) + ' up-to-date.')
# Since we have not downloaded a new version of this metadata, we should
# check to see if our local version is stale and notify the user if so.
# This raises tuf.exceptions.ExpiredMetadataError if the metadata we have
# is expired. Resolves issue #322.
self._ensure_not_expired(self.metadata['current'][metadata_role],
metadata_role)
# TODO: If 'metadata_role' is root or snapshot, we should verify that
# root's hash matches what's in snapshot, and that snapshot hash matches
# what's listed in timestamp.json.
return
logger.debug('Metadata ' + repr(metadata_filename) + ' has changed.')
# The file lengths of metadata are unknown, only their version numbers are
# known. Set an upper limit for the length of the downloaded file for each
# expected role. Note: The Timestamp role is not updated via this
# function.
if metadata_role == 'snapshot':
upperbound_filelength = tuf.settings.DEFAULT_SNAPSHOT_REQUIRED_LENGTH
elif metadata_role == 'root':
upperbound_filelength = DEFAULT_ROOT_UPPERLENGTH
# The metadata is considered Targets (or delegated Targets metadata).
else:
upperbound_filelength = tuf.settings.DEFAULT_TARGETS_REQUIRED_LENGTH
try:
self._update_metadata(metadata_role, upperbound_filelength,
expected_versioninfo['version'])
except Exception:
# The current metadata we have is not current but we couldn't get new
# metadata. We shouldn't use the old metadata anymore. This will get rid
# of in-memory knowledge of the role and delegated roles, but will leave
# delegated metadata files as current files on disk.
#
# TODO: Should we get rid of the delegated metadata files? We shouldn't
# need to, but we need to check the trust implications of the current
# implementation.
self._delete_metadata(metadata_role)
logger.error('Metadata for ' + repr(metadata_role) + ' cannot'
' be updated.')
raise
else:
# We need to import the delegated roles of 'metadata_role', since its
# list of delegations might have changed from what was previously
# loaded..
# TODO: Should we remove the keys of the delegated roles?
self._import_delegations(metadata_role)
def _versioninfo_has_been_updated(self, metadata_filename, new_versioninfo):
"""
<Purpose>
Non-public method that determines whether the current versioninfo of
'metadata_filename' is less than 'new_versioninfo' (i.e., the version
number has been incremented). The 'new_versioninfo' argument should be
extracted from the latest copy of the metadata that references
'metadata_filename'. Example: 'root.json' would be referenced by
'snapshot.json'.
'new_versioninfo' should only be 'None' if this is for updating
'root.json' without having 'snapshot.json' available.
<Arguments>
metadadata_filename:
The metadata filename for the role. For the 'root' role,
'metadata_filename' would be 'root.json'.
new_versioninfo:
A dict object representing the new file information for
'metadata_filename'. 'new_versioninfo' may be 'None' when
updating 'root' without having 'snapshot' available. This
dict conforms to 'tuf.formats.VERSIONINFO_SCHEMA' and has
the form:
{'version': 288}
<Exceptions>
None.
<Side Effects>
If there is no versioninfo currently loaded for 'metadata_filename', try
to load it.
<Returns>
Boolean. True if the versioninfo has changed, False otherwise.
"""
# If there is no versioninfo currently stored for 'metadata_filename',
# try to load the file, calculate the versioninfo, and store it.
if metadata_filename not in self.versioninfo:
self._update_versioninfo(metadata_filename)
# Return true if there is no versioninfo for 'metadata_filename'.
# 'metadata_filename' is not in the 'self.versioninfo' store
# and it doesn't exist in the 'current' metadata location.
if self.versioninfo[metadata_filename] is None:
return True
current_versioninfo = self.versioninfo[metadata_filename]
logger.debug('New version for ' + repr(metadata_filename) +
': ' + repr(new_versioninfo['version']) + '. Old version: ' +
repr(current_versioninfo['version']))
if new_versioninfo['version'] > current_versioninfo['version']:
return True
else:
return False
def _update_versioninfo(self, metadata_filename):
"""
<Purpose>
Non-public method that updates the 'self.versioninfo' entry for the
metadata belonging to 'metadata_filename'. If the current metadata for
'metadata_filename' cannot be loaded, set its 'versioninfo' to 'None' to
signal that it is not in 'self.versioninfo' AND it also doesn't exist
locally.
<Arguments>
metadata_filename:
The metadata filename for the role. For the 'root' role,
'metadata_filename' would be 'root.json'.
<Exceptions>
None.
<Side Effects>
The version number of 'metadata_filename' is calculated and stored in its
corresponding entry in 'self.versioninfo'.
<Returns>
None.
"""
# In case we delayed loading the metadata and didn't do it in
# __init__ (such as with delegated metadata), then get the version
# info now.
# Save the path to the current metadata file for 'metadata_filename'.
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filename)
# If the path is invalid, simply return and leave versioninfo unset.
if not os.path.exists(current_filepath):
self.versioninfo[metadata_filename] = None
return
# Extract the version information from the trusted snapshot role and save
# it to the 'self.versioninfo' store.
if metadata_filename == 'timestamp.json':
trusted_versioninfo = \
self.metadata['current']['timestamp']['version']
# When updating snapshot.json, the client either (1) has a copy of
# snapshot.json, or (2) is in the process of obtaining it by first
# downloading timestamp.json. Note: Clients are allowed to have only
# root.json initially, and perform a refresh of top-level metadata to
# obtain the remaining roles.
elif metadata_filename == 'snapshot.json':
# Verify the version number of the currently trusted snapshot.json in
# snapshot.json itself. Checking the version number specified in
# timestamp.json may be greater than the version specified in the
# client's copy of snapshot.json.
try:
timestamp_version_number = self.metadata['current']['snapshot']['version']
trusted_versioninfo = tuf.formats.make_versioninfo(
timestamp_version_number)
except KeyError:
trusted_versioninfo = \
self.metadata['current']['timestamp']['meta']['snapshot.json']
else:
try:
# The metadata file names in 'self.metadata' exclude the role
# extension. Strip the '.json' extension when checking if
# 'metadata_filename' currently exists.
targets_version_number = \
self.metadata['current'][metadata_filename[:-len('.json')]]['version']
trusted_versioninfo = \
tuf.formats.make_versioninfo(targets_version_number)
except KeyError:
trusted_versioninfo = \
self.metadata['current']['snapshot']['meta'][metadata_filename]
self.versioninfo[metadata_filename] = trusted_versioninfo
def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):
"""
<Purpose>
Non-public method that determines whether the current fileinfo of
'metadata_filename' differs from 'new_fileinfo'. The 'new_fileinfo'
argument should be extracted from the latest copy of the metadata that
references 'metadata_filename'. Example: 'root.json' would be referenced
by 'snapshot.json'.
'new_fileinfo' should only be 'None' if this is for updating 'root.json'
without having 'snapshot.json' available.
<Arguments>
metadadata_filename:
The metadata filename for the role. For the 'root' role,
'metadata_filename' would be 'root.json'.
new_fileinfo:
A dict object representing the new file information for
'metadata_filename'. 'new_fileinfo' may be 'None' when
updating 'root' without having 'snapshot' available. This
dict conforms to 'tuf.formats.FILEINFO_SCHEMA' and has
the form:
{'length': 23423
'hashes': {'sha256': adfbc32343..}}
<Exceptions>
None.
<Side Effects>
If there is no fileinfo currently loaded for 'metada_filename',
try to load it.
<Returns>
Boolean. True if the fileinfo has changed, false otherwise.
"""
# If there is no fileinfo currently stored for 'metadata_filename',
# try to load the file, calculate the fileinfo, and store it.
if metadata_filename not in self.fileinfo:
self._update_fileinfo(metadata_filename)
# Return true if there is no fileinfo for 'metadata_filename'.
# 'metadata_filename' is not in the 'self.fileinfo' store
# and it doesn't exist in the 'current' metadata location.
if self.fileinfo[metadata_filename] is None:
return True
current_fileinfo = self.fileinfo[metadata_filename]
if current_fileinfo['length'] != new_fileinfo['length']:
return True
# Now compare hashes. Note that the reason we can't just do a simple
# equality check on the fileinfo dicts is that we want to support the
# case where the hash algorithms listed in the metadata have changed
# without having that result in considering all files as needing to be
# updated, or not all hash algorithms listed can be calculated on the
# specific client.
for algorithm, hash_value in six.iteritems(new_fileinfo['hashes']):
# We're only looking for a single match. This isn't a security
# check, we just want to prevent unnecessary downloads.
if algorithm in current_fileinfo['hashes']:
if hash_value == current_fileinfo['hashes'][algorithm]:
return False
return True
def _update_fileinfo(self, metadata_filename):
"""
<Purpose>
Non-public method that updates the 'self.fileinfo' entry for the metadata
belonging to 'metadata_filename'. If the 'current' metadata for
'metadata_filename' cannot be loaded, set its fileinfo' to 'None' to
signal that it is not in the 'self.fileinfo' AND it also doesn't exist
locally.
<Arguments>
metadata_filename:
The metadata filename for the role. For the 'root' role,
'metadata_filename' would be 'root.json'.
<Exceptions>
None.
<Side Effects>
The file details of 'metadata_filename' is calculated and
stored in 'self.fileinfo'.
<Returns>
None.
"""
# In case we delayed loading the metadata and didn't do it in
# __init__ (such as with delegated metadata), then get the file
# info now.
# Save the path to the current metadata file for 'metadata_filename'.
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filename)
# If the path is invalid, simply return and leave fileinfo unset.
if not os.path.exists(current_filepath):
self.fileinfo[metadata_filename] = None
return
# Extract the file information from the actual file and save it
# to the fileinfo store.
file_length, hashes = securesystemslib.util.get_file_details(
current_filepath)
metadata_fileinfo = tuf.formats.make_fileinfo(file_length, hashes)
self.fileinfo[metadata_filename] = metadata_fileinfo
def _move_current_to_previous(self, metadata_role):
"""
<Purpose>
Non-public method that moves the current metadata file for 'metadata_role'
to the previous directory.
<Arguments>
metadata_role:
The name of the metadata. This is a role name and should not end
in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'.
<Exceptions>
None.
<Side Effects>
The metadata file for 'metadata_role' is removed from 'current'
and moved to the 'previous' directory.
<Returns>
None.
"""
# Get the 'current' and 'previous' full file paths for 'metadata_role'
metadata_filepath = metadata_role + '.json'
previous_filepath = os.path.join(self.metadata_directory['previous'],
metadata_filepath)
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filepath)
# Remove the previous path if it exists.
if os.path.exists(previous_filepath):
os.remove(previous_filepath)
# Move the current path to the previous path.
if os.path.exists(current_filepath):
securesystemslib.util.ensure_parent_dir(previous_filepath)
os.rename(current_filepath, previous_filepath)
def _delete_metadata(self, metadata_role):
"""
<Purpose>
Non-public method that removes all (current) knowledge of 'metadata_role'.
The metadata belonging to 'metadata_role' is removed from the current
'self.metadata' store and from the role database. The 'root.json' role
file is never removed.
<Arguments>
metadata_role:
The name of the metadata. This is a role name and should not end
in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'.
<Exceptions>
None.
<Side Effects>
The role database is modified and the metadata for 'metadata_role'
removed from the 'self.metadata' store.
<Returns>
None.
"""
# The root metadata role is never deleted without a replacement.
if metadata_role == 'root':
return
# Get rid of the current metadata file.
self._move_current_to_previous(metadata_role)
# Remove knowledge of the role.
if metadata_role in self.metadata['current']:
del self.metadata['current'][metadata_role]
tuf.roledb.remove_role(metadata_role, self.repository_name)
def _ensure_not_expired(self, metadata_object, metadata_rolename):
"""
<Purpose>
Non-public method that raises an exception if the current specified
metadata has expired.
<Arguments>
metadata_object:
The metadata that should be expired, a 'tuf.formats.ANYROLE_SCHEMA'
object.
metadata_rolename:
The name of the metadata. This is a role name and should not end
in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'.
<Exceptions>
tuf.exceptions.ExpiredMetadataError:
If 'metadata_rolename' has expired.
<Side Effects>
None.
<Returns>
None.
"""
# Extract the expiration time.
expires = metadata_object['expires']
# If the current time has surpassed the expiration date, raise an
# exception. 'expires' is in
# 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA' format (e.g.,
# '1985-10-21T01:22:00Z'.) Convert it to a unix timestamp and compare it
# against the current time.time() (also in Unix/POSIX time format, although
# with microseconds attached.)
current_time = int(time.time())
# Generate a user-friendly error message if 'expires' is less than the
# current time (i.e., a local time.)
expires_datetime = iso8601.parse_date(expires)
expires_timestamp = tuf.formats.datetime_to_unix_timestamp(expires_datetime)
if expires_timestamp < current_time:
message = 'Metadata '+repr(metadata_rolename)+' expired on ' + \
expires_datetime.ctime() + ' (UTC).'
logger.error(message)
raise tuf.exceptions.ExpiredMetadataError(message)
def all_targets(self):
"""
<Purpose>
NOTE: This function is deprecated. Its behavior with regard to which
delegating Targets roles are trusted to determine how to validate a
delegated Targets role is NOT WELL DEFINED. Please transition to use of
get_one_valid_targetinfo()!
Get a list of the target information for all the trusted targets on the
repository. This list also includes all the targets of delegated roles.
Targets of the list returned are ordered according the trusted order of
the delegated roles, where parent roles come before children. The list
conforms to 'tuf.formats.TARGETINFOS_SCHEMA' and has the form:
[{'filepath': 'a/b/c.txt',
'fileinfo': {'length': 13323,
'hashes': {'sha256': dbfac345..}}
...]
<Arguments>
None.
<Exceptions>
tuf.exceptions.RepositoryError:
If the metadata for the 'targets' role is missing from
the 'snapshot' metadata.
tuf.exceptions.UnknownRoleError:
If one of the roles could not be found in the role database.
<Side Effects>
The metadata for target roles is updated and stored.
<Returns>
A list of targets, conformant to
'tuf.formats.TARGETINFOS_SCHEMA'.
"""
warnings.warn(
'Support for all_targets() will be removed in a future release.'
' get_one_valid_targetinfo() should be used instead.',
DeprecationWarning)
# Load the most up-to-date targets of the 'targets' role and all
# delegated roles.
self._refresh_targets_metadata(refresh_all_delegated_roles=True)
# Fetch the targets for the 'targets' role.
all_targets = self._targets_of_role('targets', skip_refresh=True)
# Fetch the targets of the delegated roles. get_rolenames returns
# all roles available on the repository.
delegated_targets = []
for role in tuf.roledb.get_rolenames(self.repository_name):
if role in ['root', 'snapshot', 'targets', 'timestamp']:
continue
else:
delegated_targets.extend(self._targets_of_role(role, skip_refresh=True))
all_targets.extend(delegated_targets)
return all_targets
def _refresh_targets_metadata(self, rolename='targets',
refresh_all_delegated_roles=False):
"""
<Purpose>
Non-public method that refreshes the targets metadata of 'rolename'. If
'refresh_all_delegated_roles' is True, include all the delegations that
follow 'rolename'. The metadata for the 'targets' role is updated in
refresh() by the _update_metadata_if_changed('targets') call, not here.
Delegated roles are not loaded when the repository is first initialized.
They are loaded from disk, updated if they have changed, and stored to
the 'self.metadata' store by this method. This method is called by
get_one_valid_targetinfo().
<Arguments>
rolename:
This is a delegated role name and should not end in '.json'. Example:
'unclaimed'.
refresh_all_delegated_roles:
Boolean indicating if all the delegated roles available in the
repository (via snapshot.json) should be refreshed.
<Exceptions>
tuf.exceptions.RepositoryError:
If the metadata file for the 'targets' role is missing from the
'snapshot' metadata.
<Side Effects>
The metadata for the delegated roles are loaded and updated if they
have changed. Delegated metadata is removed from the role database if
it has expired.
<Returns>
None.
"""
roles_to_update = []
if rolename + '.json' in self.metadata['current']['snapshot']['meta']:
roles_to_update.append(rolename)
if refresh_all_delegated_roles:
for role in six.iterkeys(self.metadata['current']['snapshot']['meta']):
# snapshot.json keeps track of root.json, targets.json, and delegated
# roles (e.g., django.json, unclaimed.json). Remove the 'targets' role
# because it gets updated when the targets.json file is updated in
# _update_metadata_if_changed('targets') and root.
if role.endswith('.json'):
role = role[:-len('.json')]
if role not in ['root', 'targets', rolename]:
roles_to_update.append(role)
else:
continue
# If there is nothing to refresh, we are done.
if not roles_to_update:
return
logger.debug('Roles to update: ' + repr(roles_to_update) + '.')
# Iterate 'roles_to_update', and load and update its metadata file if it
# has changed.
for rolename in roles_to_update:
self._load_metadata_from_file('previous', rolename)
self._load_metadata_from_file('current', rolename)
self._update_metadata_if_changed(rolename)
def _targets_of_role(self, rolename, targets=None, skip_refresh=False):
"""
<Purpose>
Non-public method that returns the target information of all the targets
of 'rolename'. The returned information is a list conformant to
'tuf.formats.TARGETINFOS_SCHEMA', and has the form:
[{'filepath': 'a/b/c.txt',
'fileinfo': {'length': 13323,
'hashes': {'sha256': dbfac345..}}
...]
<Arguments>
rolename:
This is a role name and should not end in '.json'. Examples: 'targets',
'unclaimed'.
targets:
A list of targets containing target information, conformant to
'tuf.formats.TARGETINFOS_SCHEMA'.
skip_refresh:
A boolean indicating if the target metadata for 'rolename'
should be refreshed.
<Exceptions>
tuf.exceptions.UnknownRoleError:
If 'rolename' is not found in the role database.
<Side Effects>
The metadata for 'rolename' is refreshed if 'skip_refresh' is False.
<Returns>
A list of dict objects containing the target information of all the
targets of 'rolename'. Conformant to
'tuf.formats.TARGETINFOS_SCHEMA'.
"""
if targets is None:
targets = []
targets_of_role = list(targets)
logger.debug('Getting targets of role: ' + repr(rolename) + '.')
if not tuf.roledb.role_exists(rolename, self.repository_name):
raise tuf.exceptions.UnknownRoleError(rolename)
# We do not need to worry about the target paths being trusted because
# this is enforced before any new metadata is accepted.
if not skip_refresh:
self._refresh_targets_metadata(rolename)
# Do we have metadata for 'rolename'?
if rolename not in self.metadata['current']:
logger.debug('No metadata for ' + repr(rolename) + '.'
' Unable to determine targets.')
return []
# Get the targets specified by the role itself.
for filepath, fileinfo in six.iteritems(self.metadata['current'][rolename].get('targets', [])):
new_target = {}
new_target['filepath'] = filepath
new_target['fileinfo'] = fileinfo
targets_of_role.append(new_target)
return targets_of_role
def targets_of_role(self, rolename='targets'):
"""
<Purpose>
NOTE: This function is deprecated. Use with rolename 'targets' is secure
and the behavior well-defined, but use with any delegated targets role is
not. Please transition use for delegated targets roles to
get_one_valid_targetinfo(). More information is below.
Return a list of trusted targets directly specified by 'rolename'.
The returned information is a list conformant to
'tuf.formats.TARGETINFOS_SCHEMA', and has the form:
[{'filepath': 'a/b/c.txt',
'fileinfo': {'length': 13323,
'hashes': {'sha256': dbfac345..}}
...]
The metadata of 'rolename' is updated if out of date, including the
metadata of its parent roles (i.e., the minimum roles needed to set the
chain of trust).
<Arguments>
rolename:
The name of the role whose list of targets are wanted.
The name of the role should start with 'targets'.
<Exceptions>
securesystemslib.exceptions.FormatError:
If 'rolename' is improperly formatted.
tuf.exceptions.RepositoryError:
If the metadata of 'rolename' cannot be updated.
tuf.exceptions.UnknownRoleError:
If 'rolename' is not found in the role database.
<Side Effects>
The metadata of updated delegated roles are downloaded and stored.
<Returns>
A list of targets, conformant to
'tuf.formats.TARGETINFOS_SCHEMA'.
"""
warnings.warn(
'Support for targets_of_role() will be removed in a future release.'
' get_one_valid_targetinfo() should be used instead.',
DeprecationWarning)
# Does 'rolename' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
tuf.formats.RELPATH_SCHEMA.check_match(rolename)
# If we've been given a delegated targets role, we don't know how to
# validate it without knowing what the delegating role is -- there could
# be several roles that delegate to the given role. Behavior of this
# function for roles other than Targets is not well defined as a result.
# This function is deprecated, but:
# - Usage of this function or a future successor makes sense when the
# role of interest is Targets, since we always know exactly how to
# validate Targets (We use root.).
# - Until it's removed (hopefully soon), we'll try to provide what it has
# always provided. To do this, we fetch and "validate" all delegated
# roles listed by snapshot. For delegated roles only, the order of the
# validation impacts the security of the validation -- the most-
# recently-validated role delegating to a role you are currently
# validating determines the expected keyids and threshold of the role
# you are currently validating. That is NOT GOOD. Again, please switch
# to get_one_valid_targetinfo, which is well-defined and secure.
if rolename != 'targets':
self._refresh_targets_metadata(refresh_all_delegated_roles=True)
if not tuf.roledb.role_exists(rolename, self.repository_name):
raise tuf.exceptions.UnknownRoleError(rolename)
return self._targets_of_role(rolename, skip_refresh=True)
def get_one_valid_targetinfo(self, target_filepath):
"""
<Purpose>
Return the target information for 'target_filepath', and update its
corresponding metadata, if necessary. 'target_filepath' must match
exactly as it appears in metadata, and should not contain URL encoding
escapes.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
securesystemslib.exceptions.FormatError:
If 'target_filepath' is improperly formatted.
tuf.exceptions.UnknownTargetError:
If 'target_filepath' was not found.
Any other unforeseen runtime exception.
<Side Effects>
The metadata for updated delegated roles are downloaded and stored.
<Returns>
The target information for 'target_filepath', conformant to
'tuf.formats.TARGETINFO_SCHEMA'.
"""
# Does 'target_filepath' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
tuf.formats.RELPATH_SCHEMA.check_match(target_filepath)
target_filepath = target_filepath.replace('\\', '/')
if target_filepath.startswith('/'):
raise tuf.exceptions.FormatError('The requested target file cannot'
' contain a leading path separator: ' + repr(target_filepath))
# Get target by looking at roles in order of priority tags.
target = self._preorder_depth_first_walk(target_filepath)
# Raise an exception if the target information could not be retrieved.
if target is None:
logger.error(repr(target_filepath) + ' not found.')
raise tuf.exceptions.UnknownTargetError(repr(target_filepath) + ' not'
' found.')
# Otherwise, return the found target.
else:
return target
def _preorder_depth_first_walk(self, target_filepath):
"""
<Purpose>
Non-public method that interrogates the tree of target delegations in
order of appearance (which implicitly order trustworthiness), and returns
the matching target found in the most trusted role.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
securesystemslib.exceptions.FormatError:
If 'target_filepath' is improperly formatted.
tuf.exceptions.RepositoryError:
If 'target_filepath' is not found.
<Side Effects>
The metadata for updated delegated roles are downloaded and stored.
<Returns>
The target information for 'target_filepath', conformant to
'tuf.formats.TARGETINFO_SCHEMA'.
"""
target = None
current_metadata = self.metadata['current']
role_names = ['targets']
visited_role_names = set()
number_of_delegations = tuf.settings.MAX_NUMBER_OF_DELEGATIONS
# Ensure the client has the most up-to-date version of 'targets.json'.
# Raise 'tuf.exceptions.NoWorkingMirrorError' if the changed metadata
# cannot be successfully downloaded and 'tuf.exceptions.RepositoryError' if
# the referenced metadata is missing. Target methods such as this one are
# called after the top-level metadata have been refreshed (i.e.,
# updater.refresh()).
self._update_metadata_if_changed('targets')
# Preorder depth-first traversal of the graph of target delegations.
while target is None and number_of_delegations > 0 and len(role_names) > 0:
# Pop the role name from the top of the stack.
role_name = role_names.pop(-1)
# Skip any visited current role to prevent cycles.
if role_name in visited_role_names:
logger.debug('Skipping visited current role ' + repr(role_name))
continue
# The metadata for 'role_name' must be downloaded/updated before its
# targets, delegations, and child roles can be inspected.
# self.metadata['current'][role_name] is currently missing.
# _refresh_targets_metadata() does not refresh 'targets.json', it
# expects _update_metadata_if_changed() to have already refreshed it,
# which this function has checked above.
self._refresh_targets_metadata(role_name,
refresh_all_delegated_roles=False)
role_metadata = current_metadata[role_name]
targets = role_metadata['targets']
delegations = role_metadata.get('delegations', {})
child_roles = delegations.get('roles', [])
target = self._get_target_from_targets_role(role_name, targets,
target_filepath)
# After preorder check, add current role to set of visited roles.
visited_role_names.add(role_name)
# And also decrement number of visited roles.
number_of_delegations -= 1
if target is None:
child_roles_to_visit = []
# NOTE: This may be a slow operation if there are many delegated roles.
for child_role in child_roles:
child_role_name = self._visit_child_role(child_role, target_filepath)
if child_role['terminating'] and child_role_name is not None:
logger.debug('Adding child role ' + repr(child_role_name))
logger.debug('Not backtracking to other roles.')
role_names = []
child_roles_to_visit.append(child_role_name)
break
elif child_role_name is None:
logger.debug('Skipping child role ' + repr(child_role_name))
else:
logger.debug('Adding child role ' + repr(child_role_name))
child_roles_to_visit.append(child_role_name)
# Push 'child_roles_to_visit' in reverse order of appearance onto
# 'role_names'. Roles are popped from the end of the 'role_names'
# list.
child_roles_to_visit.reverse()
role_names.extend(child_roles_to_visit)
else:
logger.debug('Found target in current role ' + repr(role_name))
if target is None and number_of_delegations == 0 and len(role_names) > 0:
logger.debug(repr(len(role_names)) + ' roles left to visit, ' +
'but allowed to visit at most ' +
repr(tuf.settings.MAX_NUMBER_OF_DELEGATIONS) + ' delegations.')
return target
def _get_target_from_targets_role(self, role_name, targets, target_filepath):
"""
<Purpose>
Non-public method that determines whether the targets role with the given
'role_name' has the target with the name 'target_filepath'.
<Arguments>
role_name:
The name of the targets role that we are inspecting.
targets:
The targets of the Targets role with the name 'role_name'.
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The target information for 'target_filepath', conformant to
'tuf.formats.TARGETINFO_SCHEMA'.
"""
# Does the current role name have our target?
logger.debug('Asking role ' + repr(role_name) + ' about'
' target ' + repr(target_filepath))
target = targets.get(target_filepath)
if target:
logger.debug('Found target ' + target_filepath + ' in role ' + role_name)
return {'filepath': target_filepath, 'fileinfo': target}
else:
logger.debug(
'Target file ' + target_filepath + ' not found in role ' + role_name)
return None
def _visit_child_role(self, child_role, target_filepath):
"""
<Purpose>
Non-public method that determines whether the given 'target_filepath'
is an allowed path of 'child_role'.
Ensure that we explore only delegated roles trusted with the target. The
metadata for 'child_role' should have been refreshed prior to this point,
however, the paths/targets that 'child_role' signs for have not been
verified (as intended). The paths/targets that 'child_role' is allowed
to specify in its metadata depends on the delegating role, and thus is
left to the caller to verify. We verify here that 'target_filepath'
is an allowed path according to the delegated 'child_role'.
TODO: Should the TUF spec restrict the repository to one particular
algorithm? Should we allow the repository to specify in the role
dictionary the algorithm used for these generated hashed paths?
<Arguments>
child_role:
The delegation targets role object of 'child_role', containing its
paths, path_hash_prefixes, keys, and so on.
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
If 'child_role' has been delegated the target with the name
'target_filepath', then we return the role name of 'child_role'.
Otherwise, we return None.
"""
child_role_name = child_role['name']
child_role_paths = child_role.get('paths')
child_role_path_hash_prefixes = child_role.get('path_hash_prefixes')
if child_role_path_hash_prefixes is not None:
target_filepath_hash = self._get_target_hash(target_filepath)
for child_role_path_hash_prefix in child_role_path_hash_prefixes:
if target_filepath_hash.startswith(child_role_path_hash_prefix):
return child_role_name
else:
continue
elif child_role_paths is not None:
# Is 'child_role_name' allowed to sign for 'target_filepath'?
for child_role_path in child_role_paths:
# A child role path may be an explicit path or glob pattern (Unix
# shell-style wildcards). The child role 'child_role_name' is returned
# if 'target_filepath' is equal to or matches 'child_role_path'.
# Explicit filepaths are also considered matches. A repo maintainer
# might delegate a glob pattern with a leading path separator, while
# the client requests a matching target without a leading path
# separator - make sure to strip any leading path separators so that a
# match is made. Example: "foo.tgz" should match with "/*.tgz".
if fnmatch.fnmatch(target_filepath.lstrip(os.sep), child_role_path.lstrip(os.sep)):
logger.debug('Child role ' + repr(child_role_name) + ' is allowed to'
' sign for ' + repr(target_filepath))
return child_role_name
else:
logger.debug(
'The given target path ' + repr(target_filepath) + ' does not'
' match the trusted path or glob pattern: ' + repr(child_role_path))
continue
else:
# 'role_name' should have been validated when it was downloaded.
# The 'paths' or 'path_hash_prefixes' fields should not be missing,
# so we raise a format error here in case they are both missing.
raise securesystemslib.exceptions.FormatError(repr(child_role_name) + ' '
'has neither a "paths" nor "path_hash_prefixes". At least'
' one of these attributes must be present.')
return None
def _get_target_hash(self, target_filepath, hash_function='sha256'):
"""
<Purpose>
Non-public method that computes the hash of 'target_filepath'. This is
useful in conjunction with the "path_hash_prefixes" attribute in a
delegated targets role, which tells us which paths it is implicitly
responsible for.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
hash_function:
The algorithm used by the repository to generate the hashes of the
target filepaths. The repository may optionally organize targets into
hashed bins to ease target delegations and role metadata management.
The use of consistent hashing allows for a uniform distribution of
targets into bins.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The hash of 'target_filepath'.
"""
# Calculate the hash of the filepath to determine which bin to find the
# target. The client currently assumes the repository (i.e., repository
# tool) uses 'hash_function' to generate hashes and UTF-8.
digest_object = securesystemslib.hash.digest(hash_function)
encoded_target_filepath = target_filepath.encode('utf-8')
digest_object.update(encoded_target_filepath)
target_filepath_hash = digest_object.hexdigest()
return target_filepath_hash
def remove_obsolete_targets(self, destination_directory):
"""
<Purpose>
Remove any files that are in 'previous' but not 'current'. This makes it
so if you remove a file from a repository, it actually goes away. The
targets for the 'targets' role and all delegated roles are checked.
<Arguments>
destination_directory:
The directory containing the target files tracked by TUF.
<Exceptions>
securesystemslib.exceptions.FormatError:
If 'destination_directory' is improperly formatted.
tuf.exceptions.RepositoryError:
If an error occurred removing any files.
<Side Effects>
Target files are removed from disk.
<Returns>
None.
"""
# Does 'destination_directory' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(destination_directory)
# Iterate the rolenames and verify whether the 'previous' directory
# contains a target no longer found in 'current'.
for role in tuf.roledb.get_rolenames(self.repository_name):
if role.startswith('targets'):
if role in self.metadata['previous'] and self.metadata['previous'][role] != None:
for target in self.metadata['previous'][role]['targets']:
if target not in self.metadata['current'][role]['targets']:
# 'target' is only in 'previous', so remove it.
logger.warning('Removing obsolete file: ' + repr(target) + '.')
# Remove the file if it hasn't been removed already.
destination = \
os.path.join(destination_directory, target.lstrip(os.sep))
try:
os.remove(destination)
except OSError as e:
# If 'filename' already removed, just log it.
if e.errno == errno.ENOENT:
logger.info('File ' + repr(destination) + ' was already'
' removed.')
else:
logger.error(str(e))
else:
logger.debug('Skipping: ' + repr(target) + '. It is still'
' a current target.')
else:
logger.debug('Skipping: ' + repr(role) + '. Not in the previous'
' metadata')
def updated_targets(self, targets, destination_directory):
"""
<Purpose>
Checks files in the provided directory against the provided file metadata.
Filters the provided target info, returning a subset: only the metadata
for targets for which the target file either does not exist in the
provided directory, or for which the target file in the provided directory
does not match the provided metadata.
A principle use of this function is to determine which target files need
to be downloaded. If the caller first uses get_one_valid_target_info()
calls to obtain up-to-date, valid metadata for targets, the caller can
then call updated_targets() to determine if that metadata does not match
what exists already on disk (in the provided directory). The returned
values can then be used in download_file() calls to update the files that
didn't exist or didn't match.
The returned information is a list conformant to
'tuf.formats.TARGETINFOS_SCHEMA' and has the form:
[{'filepath': 'a/b/c.txt',
'fileinfo': {'length': 13323,
'hashes': {'sha256': dbfac345..}}
...]
<Arguments>
targets:
Metadata about the expected state of target files, against which local
files will be checked. This should be a list of target info
dictionaries; i.e. 'targets' must be conformant to
tuf.formats.TARGETINFOS_SCHEMA.
destination_directory:
The directory containing the target files.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the arguments are improperly formatted.
<Side Effects>
The files in 'targets' are read and their hashes computed.
<Returns>
A list of target info dictionaries. The list conforms to
'tuf.formats.TARGETINFOS_SCHEMA'.
This is a strict subset of the argument 'targets'.
"""
# Do the arguments have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
tuf.formats.TARGETINFOS_SCHEMA.check_match(targets)
securesystemslib.formats.PATH_SCHEMA.check_match(destination_directory)
# Keep track of the target objects and filepaths of updated targets.
# Return 'updated_targets' and use 'updated_targetpaths' to avoid
# duplicates.
updated_targets = []
updated_targetpaths = []
for target in targets:
# Prepend 'destination_directory' to the target's relative filepath (as
# stored in metadata.) Verify the hash of 'target_filepath' against
# each hash listed for its fileinfo. Note: join() discards
# 'destination_directory' if 'filepath' contains a leading path separator
# (i.e., is treated as an absolute path).
filepath = target['filepath']
if filepath[0] == '/':
filepath = filepath[1:]
target_filepath = os.path.join(destination_directory, filepath)
if target_filepath in updated_targetpaths:
continue
# Try one of the algorithm/digest combos for a mismatch. We break
# as soon as we find a mismatch.
for algorithm, digest in six.iteritems(target['fileinfo']['hashes']):
digest_object = None
try:
digest_object = securesystemslib.hash.digest_filename(target_filepath,
algorithm=algorithm)
# This exception would occur if the target does not exist locally.
except IOError:
updated_targets.append(target)
updated_targetpaths.append(target_filepath)
break
# The file does exist locally, check if its hash differs.
if digest_object.hexdigest() != digest:
updated_targets.append(target)
updated_targetpaths.append(target_filepath)
break
return updated_targets
def download_target(self, target, destination_directory):
"""
<Purpose>
Download 'target' and verify it is trusted.
This will only store the file at 'destination_directory' if the
downloaded file matches the description of the file in the trusted
metadata.
<Arguments>
target:
The target to be downloaded. Conformant to
'tuf.formats.TARGETINFO_SCHEMA'.
destination_directory:
The directory to save the downloaded target file.
<Exceptions>
securesystemslib.exceptions.FormatError:
If 'target' is not properly formatted.
tuf.exceptions.NoWorkingMirrorError:
If a target could not be downloaded from any of the mirrors.
Although expected to be rare, there might be OSError exceptions (except
errno.EEXIST) raised when creating the destination directory (if it
doesn't exist).
<Side Effects>
A target file is saved to the local system.
<Returns>
None.
"""
# Do the arguments have the correct format?
# This check ensures the arguments have the appropriate
# number of objects and object types, and that all dict
# keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fail.
tuf.formats.TARGETINFO_SCHEMA.check_match(target)
securesystemslib.formats.PATH_SCHEMA.check_match(destination_directory)
# Extract the target file information.
target_filepath = target['filepath']
trusted_length = target['fileinfo']['length']
trusted_hashes = target['fileinfo']['hashes']
# '_get_target_file()' checks every mirror and returns the first target
# that passes verification.
target_file_object = self._get_target_file(target_filepath, trusted_length,
trusted_hashes)
# We acquired a target file object from a mirror. Move the file into place
# (i.e., locally to 'destination_directory'). Note: join() discards
# 'destination_directory' if 'target_path' contains a leading path
# separator (i.e., is treated as an absolute path).
destination = os.path.join(destination_directory,
target_filepath.lstrip(os.sep))
destination = os.path.abspath(destination)
target_dirpath = os.path.dirname(destination)
# When attempting to create the leaf directory of 'target_dirpath', ignore
# any exceptions raised if the root directory already exists. All other
# exceptions potentially thrown by os.makedirs() are re-raised.
# Note: os.makedirs can raise OSError if the leaf directory already exists
# or cannot be created.
try:
os.makedirs(target_dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
target_file_object.move(destination)
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4126_0 |
crossvul-python_data_bad_4211_1 | import asyncio
import contextlib
import logging
from datetime import datetime, timedelta, timezone
from typing import Optional, Tuple, Union
import discord
from redbot.core import commands, i18n, checks, modlog
from redbot.core.commands import UserInputOptional
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import (
pagify,
humanize_number,
bold,
humanize_list,
format_perms_list,
)
from redbot.core.utils.mod import get_audit_reason
from .abc import MixinMeta
from .converters import RawUserIds
from .utils import is_allowed_by_hierarchy
log = logging.getLogger("red.mod")
_ = i18n.Translator("Mod", __file__)
class KickBanMixin(MixinMeta):
"""
Kick and ban commands and tasks go here.
"""
@staticmethod
async def get_invite_for_reinvite(ctx: commands.Context, max_age: int = 86400):
"""Handles the reinvite logic for getting an invite
to send the newly unbanned user
:returns: :class:`Invite`"""
guild = ctx.guild
my_perms: discord.Permissions = guild.me.guild_permissions
if my_perms.manage_guild or my_perms.administrator:
if "VANITY_URL" in guild.features:
# guild has a vanity url so use it as the one to send
return await guild.vanity_invite()
invites = await guild.invites()
else:
invites = []
for inv in invites: # Loop through the invites for the guild
if not (inv.max_uses or inv.max_age or inv.temporary):
# Invite is for the guild's default channel,
# has unlimited uses, doesn't expire, and
# doesn't grant temporary membership
# (i.e. they won't be kicked on disconnect)
return inv
else: # No existing invite found that is valid
channels_and_perms = zip(
guild.text_channels, map(guild.me.permissions_in, guild.text_channels)
)
channel = next(
(channel for channel, perms in channels_and_perms if perms.create_instant_invite),
None,
)
if channel is None:
return
try:
# Create invite that expires after max_age
return await channel.create_invite(max_age=max_age)
except discord.HTTPException:
return
@staticmethod
async def _voice_perm_check(
ctx: commands.Context, user_voice_state: Optional[discord.VoiceState], **perms: bool
) -> bool:
"""Check if the bot and user have sufficient permissions for voicebans.
This also verifies that the user's voice state and connected
channel are not ``None``.
Returns
-------
bool
``True`` if the permissions are sufficient and the user has
a valid voice state.
"""
if user_voice_state is None or user_voice_state.channel is None:
await ctx.send(_("That user is not in a voice channel."))
return False
voice_channel: discord.VoiceChannel = user_voice_state.channel
required_perms = discord.Permissions()
required_perms.update(**perms)
if not voice_channel.permissions_for(ctx.me) >= required_perms:
await ctx.send(
_("I require the {perms} permission(s) in that user's channel to do that.").format(
perms=format_perms_list(required_perms)
)
)
return False
if (
ctx.permission_state is commands.PermState.NORMAL
and not voice_channel.permissions_for(ctx.author) >= required_perms
):
await ctx.send(
_(
"You must have the {perms} permission(s) in that user's channel to use this "
"command."
).format(perms=format_perms_list(required_perms))
)
return False
return True
async def ban_user(
self,
user: Union[discord.Member, discord.User, discord.Object],
ctx: commands.Context,
days: int = 0,
reason: str = None,
create_modlog_case=False,
) -> Tuple[bool, str]:
author = ctx.author
guild = ctx.guild
removed_temp = False
if not (0 <= days <= 7):
return False, _("Invalid days. Must be between 0 and 7.")
if isinstance(user, discord.Member):
if author == user:
return (
False,
_("I cannot let you do that. Self-harm is bad {}").format("\N{PENSIVE FACE}"),
)
elif not await is_allowed_by_hierarchy(self.bot, self.config, guild, author, user):
return (
False,
_(
"I cannot let you do that. You are "
"not higher than the user in the role "
"hierarchy."
),
)
elif guild.me.top_role <= user.top_role or user == guild.owner:
return False, _("I cannot do that due to Discord hierarchy rules.")
toggle = await self.config.guild(guild).dm_on_kickban()
if toggle:
with contextlib.suppress(discord.HTTPException):
em = discord.Embed(
title=bold(_("You have been banned from {guild}.").format(guild=guild))
)
em.add_field(
name=_("**Reason**"),
value=reason if reason is not None else _("No reason was given."),
inline=False,
)
await user.send(embed=em)
ban_type = "ban"
else:
tempbans = await self.config.guild(guild).current_tempbans()
ban_list = [ban.user.id for ban in await guild.bans()]
if user.id in ban_list:
if user.id in tempbans:
async with self.config.guild(guild).current_tempbans() as tempbans:
tempbans.remove(user.id)
removed_temp = True
else:
return (
False,
_("User with ID {user_id} is already banned.").format(user_id=user.id),
)
ban_type = "hackban"
audit_reason = get_audit_reason(author, reason)
queue_entry = (guild.id, user.id)
if removed_temp:
log.info(
"{}({}) upgraded the tempban for {} to a permaban.".format(
author.name, author.id, user.id
)
)
success_message = _(
"User with ID {user_id} was upgraded from a temporary to a permanent ban."
).format(user_id=user.id)
else:
username = user.name if hasattr(user, "name") else "Unknown"
try:
await guild.ban(user, reason=audit_reason, delete_message_days=days)
log.info(
"{}({}) {}ned {}({}), deleting {} days worth of messages.".format(
author.name, author.id, ban_type, username, user.id, str(days)
)
)
success_message = _("Done. That felt good.")
except discord.Forbidden:
return False, _("I'm not allowed to do that.")
except discord.NotFound:
return False, _("User with ID {user_id} not found").format(user_id=user.id)
except Exception as e:
log.exception(
"{}({}) attempted to {} {}({}), but an error occurred.".format(
author.name, author.id, ban_type, username, user.id
)
)
return False, _("An unexpected error occurred.")
if create_modlog_case:
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
ban_type,
user,
author,
reason,
until=None,
channel=None,
)
return True, success_message
async def check_tempban_expirations(self):
while self == self.bot.get_cog("Mod"):
async for guild in AsyncIter(self.bot.guilds, steps=100):
if not guild.me.guild_permissions.ban_members:
continue
if await self.bot.cog_disabled_in_guild(self, guild):
continue
async with self.config.guild(guild).current_tempbans() as guild_tempbans:
for uid in guild_tempbans.copy():
unban_time = datetime.fromtimestamp(
await self.config.member_from_ids(guild.id, uid).banned_until(),
timezone.utc,
)
if datetime.now(timezone.utc) > unban_time: # Time to unban the user
queue_entry = (guild.id, uid)
try:
await guild.unban(
discord.Object(id=uid), reason=_("Tempban finished")
)
except discord.NotFound:
# user is not banned anymore
guild_tempbans.remove(uid)
except discord.HTTPException as e:
# 50013: Missing permissions error code or 403: Forbidden status
if e.code == 50013 or e.status == 403:
log.info(
f"Failed to unban ({uid}) user from "
f"{guild.name}({guild.id}) guild due to permissions."
)
break # skip the rest of this guild
log.info(f"Failed to unban member: error code: {e.code}")
else:
# user unbanned successfully
guild_tempbans.remove(uid)
await asyncio.sleep(60)
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(kick_members=True)
@checks.admin_or_permissions(kick_members=True)
async def kick(self, ctx: commands.Context, user: discord.Member, *, reason: str = None):
"""Kick a user.
If a reason is specified, it will be the reason that shows up
in the audit log.
"""
author = ctx.author
guild = ctx.guild
if author == user:
await ctx.send(
_("I cannot let you do that. Self-harm is bad {emoji}").format(
emoji="\N{PENSIVE FACE}"
)
)
return
elif not await is_allowed_by_hierarchy(self.bot, self.config, guild, author, user):
await ctx.send(
_(
"I cannot let you do that. You are "
"not higher than the user in the role "
"hierarchy."
)
)
return
elif ctx.guild.me.top_role <= user.top_role or user == ctx.guild.owner:
await ctx.send(_("I cannot do that due to Discord hierarchy rules."))
return
audit_reason = get_audit_reason(author, reason)
toggle = await self.config.guild(guild).dm_on_kickban()
if toggle:
with contextlib.suppress(discord.HTTPException):
em = discord.Embed(
title=bold(_("You have been kicked from {guild}.").format(guild=guild))
)
em.add_field(
name=_("**Reason**"),
value=reason if reason is not None else _("No reason was given."),
inline=False,
)
await user.send(embed=em)
try:
await guild.kick(user, reason=audit_reason)
log.info("{}({}) kicked {}({})".format(author.name, author.id, user.name, user.id))
except discord.errors.Forbidden:
await ctx.send(_("I'm not allowed to do that."))
except Exception as e:
log.exception(
"{}({}) attempted to kick {}({}), but an error occurred.".format(
author.name, author.id, user.name, user.id
)
)
else:
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"kick",
user,
author,
reason,
until=None,
channel=None,
)
await ctx.send(_("Done. That felt good."))
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
async def ban(
self,
ctx: commands.Context,
user: Union[discord.Member, RawUserIds],
days: Optional[int] = None,
*,
reason: str = None,
):
"""Ban a user from this server and optionally delete days of messages.
A user ID should be provided if the user is not a member of this server.
If days is not a number, it's treated as the first word of the reason.
Minimum 0 days, maximum 7. If not specified, defaultdays setting will be used instead."""
author = ctx.author
guild = ctx.guild
if days is None:
days = await self.config.guild(guild).default_days()
if isinstance(user, int):
user = self.bot.get_user(user) or discord.Object(id=user)
success_, message = await self.ban_user(
user=user, ctx=ctx, days=days, reason=reason, create_modlog_case=True
)
await ctx.send(message)
@commands.command(aliases=["hackban"])
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
async def massban(
self,
ctx: commands.Context,
user_ids: commands.Greedy[RawUserIds],
days: Optional[int] = None,
*,
reason: str = None,
):
"""Mass bans user(s) from the server.
User IDs need to be provided in order to ban
using this command."""
banned = []
errors = {}
upgrades = []
async def show_results():
text = _("Banned {num} users from the server.").format(
num=humanize_number(len(banned))
)
if errors:
text += _("\nErrors:\n")
text += "\n".join(errors.values())
if upgrades:
text += _(
"\nFollowing user IDs have been upgraded from a temporary to a permanent ban:\n"
)
text += humanize_list(upgrades)
for p in pagify(text):
await ctx.send(p)
def remove_processed(ids):
return [_id for _id in ids if _id not in banned and _id not in errors]
user_ids = list(set(user_ids)) # No dupes
author = ctx.author
guild = ctx.guild
if not user_ids:
await ctx.send_help()
return
if days is None:
days = await self.config.guild(guild).default_days()
if not (0 <= days <= 7):
await ctx.send(_("Invalid days. Must be between 0 and 7."))
return
if not guild.me.guild_permissions.ban_members:
return await ctx.send(_("I lack the permissions to do this."))
tempbans = await self.config.guild(guild).current_tempbans()
ban_list = await guild.bans()
for entry in ban_list:
for user_id in user_ids:
if entry.user.id == user_id:
if user_id in tempbans:
# We need to check if a user is tempbanned here because otherwise they won't be processed later on.
continue
else:
errors[user_id] = _("User with ID {user_id} is already banned.").format(
user_id=user_id
)
user_ids = remove_processed(user_ids)
if not user_ids:
await show_results()
return
for user_id in user_ids:
user = guild.get_member(user_id)
if user is not None:
if user_id in tempbans:
# We need to check if a user is tempbanned here because otherwise they won't be processed later on.
continue
else:
# Instead of replicating all that handling... gets attr from decorator
try:
success, reason = await self.ban_user(
user=user, ctx=ctx, days=days, reason=reason, create_modlog_case=True
)
if success:
banned.append(user_id)
else:
errors[user_id] = _("Failed to ban user {user_id}: {reason}").format(
user_id=user_id, reason=reason
)
except Exception as e:
errors[user_id] = _("Failed to ban user {user_id}: {reason}").format(
user_id=user_id, reason=e
)
user_ids = remove_processed(user_ids)
if not user_ids:
await show_results()
return
for user_id in user_ids:
user = discord.Object(id=user_id)
audit_reason = get_audit_reason(author, reason)
queue_entry = (guild.id, user_id)
async with self.config.guild(guild).current_tempbans() as tempbans:
if user_id in tempbans:
tempbans.remove(user_id)
upgrades.append(str(user_id))
log.info(
"{}({}) upgraded the tempban for {} to a permaban.".format(
author.name, author.id, user_id
)
)
banned.append(user_id)
else:
try:
await guild.ban(user, reason=audit_reason, delete_message_days=days)
log.info("{}({}) hackbanned {}".format(author.name, author.id, user_id))
except discord.NotFound:
errors[user_id] = _("User with ID {user_id} not found").format(
user_id=user_id
)
continue
except discord.Forbidden:
errors[user_id] = _(
"Could not ban user with ID {user_id}: missing permissions."
).format(user_id=user_id)
continue
else:
banned.append(user_id)
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"hackban",
user_id,
author,
reason,
until=None,
channel=None,
)
await show_results()
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
async def tempban(
self,
ctx: commands.Context,
user: discord.Member,
duration: Optional[commands.TimedeltaConverter] = None,
days: Optional[int] = None,
*,
reason: str = None,
):
"""Temporarily ban a user from this server."""
guild = ctx.guild
author = ctx.author
if author == user:
await ctx.send(
_("I cannot let you do that. Self-harm is bad {}").format("\N{PENSIVE FACE}")
)
return
elif not await is_allowed_by_hierarchy(self.bot, self.config, guild, author, user):
await ctx.send(
_(
"I cannot let you do that. You are "
"not higher than the user in the role "
"hierarchy."
)
)
return
elif guild.me.top_role <= user.top_role or user == guild.owner:
await ctx.send(_("I cannot do that due to Discord hierarchy rules."))
return
if duration is None:
duration = timedelta(seconds=await self.config.guild(guild).default_tempban_duration())
unban_time = datetime.now(timezone.utc) + duration
if days is None:
days = await self.config.guild(guild).default_days()
if not (0 <= days <= 7):
await ctx.send(_("Invalid days. Must be between 0 and 7."))
return
invite = await self.get_invite_for_reinvite(ctx, int(duration.total_seconds() + 86400))
if invite is None:
invite = ""
queue_entry = (guild.id, user.id)
await self.config.member(user).banned_until.set(unban_time.timestamp())
async with self.config.guild(guild).current_tempbans() as current_tempbans:
current_tempbans.append(user.id)
with contextlib.suppress(discord.HTTPException):
# We don't want blocked DMs preventing us from banning
msg = _("You have been temporarily banned from {server_name} until {date}.").format(
server_name=guild.name, date=unban_time.strftime("%m-%d-%Y %H:%M:%S")
)
if invite:
msg += _(" Here is an invite for when your ban expires: {invite_link}").format(
invite_link=invite
)
await user.send(msg)
try:
await guild.ban(user, reason=reason, delete_message_days=days)
except discord.Forbidden:
await ctx.send(_("I can't do that for some reason."))
except discord.HTTPException:
await ctx.send(_("Something went wrong while banning."))
else:
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"tempban",
user,
author,
reason,
unban_time,
)
await ctx.send(_("Done. Enough chaos for now."))
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
async def softban(self, ctx: commands.Context, user: discord.Member, *, reason: str = None):
"""Kick a user and delete 1 day's worth of their messages."""
guild = ctx.guild
author = ctx.author
if author == user:
await ctx.send(
_("I cannot let you do that. Self-harm is bad {emoji}").format(
emoji="\N{PENSIVE FACE}"
)
)
return
elif not await is_allowed_by_hierarchy(self.bot, self.config, guild, author, user):
await ctx.send(
_(
"I cannot let you do that. You are "
"not higher than the user in the role "
"hierarchy."
)
)
return
audit_reason = get_audit_reason(author, reason)
invite = await self.get_invite_for_reinvite(ctx)
if invite is None:
invite = ""
queue_entry = (guild.id, user.id)
try: # We don't want blocked DMs preventing us from banning
msg = await user.send(
_(
"You have been banned and "
"then unbanned as a quick way to delete your messages.\n"
"You can now join the server again. {invite_link}"
).format(invite_link=invite)
)
except discord.HTTPException:
msg = None
try:
await guild.ban(user, reason=audit_reason, delete_message_days=1)
except discord.errors.Forbidden:
await ctx.send(_("My role is not high enough to softban that user."))
if msg is not None:
await msg.delete()
return
except discord.HTTPException as e:
log.exception(
"{}({}) attempted to softban {}({}), but an error occurred trying to ban them.".format(
author.name, author.id, user.name, user.id
)
)
return
try:
await guild.unban(user)
except discord.HTTPException as e:
log.exception(
"{}({}) attempted to softban {}({}), but an error occurred trying to unban them.".format(
author.name, author.id, user.name, user.id
)
)
return
else:
log.info(
"{}({}) softbanned {}({}), deleting 1 day worth "
"of messages.".format(author.name, author.id, user.name, user.id)
)
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"softban",
user,
author,
reason,
until=None,
channel=None,
)
await ctx.send(_("Done. Enough chaos."))
@commands.command()
@commands.guild_only()
@commands.mod_or_permissions(move_members=True)
async def voicekick(
self, ctx: commands.Context, member: discord.Member, *, reason: str = None
):
"""Kick a member from a voice channel."""
author = ctx.author
guild = ctx.guild
user_voice_state: discord.VoiceState = member.voice
if await self._voice_perm_check(ctx, user_voice_state, move_members=True) is False:
return
elif not await is_allowed_by_hierarchy(self.bot, self.config, guild, author, member):
await ctx.send(
_(
"I cannot let you do that. You are "
"not higher than the user in the role "
"hierarchy."
)
)
return
case_channel = member.voice.channel
# Store this channel for the case channel.
try:
await member.move_to(None)
except discord.Forbidden: # Very unlikely that this will ever occur
await ctx.send(_("I am unable to kick this member from the voice channel."))
return
except discord.HTTPException:
await ctx.send(_("Something went wrong while attempting to kick that member."))
return
else:
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"vkick",
member,
author,
reason,
until=None,
channel=case_channel,
)
@commands.command()
@commands.guild_only()
@checks.admin_or_permissions(mute_members=True, deafen_members=True)
async def voiceunban(self, ctx: commands.Context, user: discord.Member, *, reason: str = None):
"""Unban a user from speaking and listening in the server's voice channels."""
user_voice_state = user.voice
if (
await self._voice_perm_check(
ctx, user_voice_state, deafen_members=True, mute_members=True
)
is False
):
return
needs_unmute = True if user_voice_state.mute else False
needs_undeafen = True if user_voice_state.deaf else False
audit_reason = get_audit_reason(ctx.author, reason)
if needs_unmute and needs_undeafen:
await user.edit(mute=False, deafen=False, reason=audit_reason)
elif needs_unmute:
await user.edit(mute=False, reason=audit_reason)
elif needs_undeafen:
await user.edit(deafen=False, reason=audit_reason)
else:
await ctx.send(_("That user isn't muted or deafened by the server."))
return
guild = ctx.guild
author = ctx.author
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"voiceunban",
user,
author,
reason,
until=None,
channel=None,
)
await ctx.send(_("User is now allowed to speak and listen in voice channels."))
@commands.command()
@commands.guild_only()
@checks.admin_or_permissions(mute_members=True, deafen_members=True)
async def voiceban(self, ctx: commands.Context, user: discord.Member, *, reason: str = None):
"""Ban a user from speaking and listening in the server's voice channels."""
user_voice_state: discord.VoiceState = user.voice
if (
await self._voice_perm_check(
ctx, user_voice_state, deafen_members=True, mute_members=True
)
is False
):
return
needs_mute = True if user_voice_state.mute is False else False
needs_deafen = True if user_voice_state.deaf is False else False
audit_reason = get_audit_reason(ctx.author, reason)
author = ctx.author
guild = ctx.guild
if needs_mute and needs_deafen:
await user.edit(mute=True, deafen=True, reason=audit_reason)
elif needs_mute:
await user.edit(mute=True, reason=audit_reason)
elif needs_deafen:
await user.edit(deafen=True, reason=audit_reason)
else:
await ctx.send(_("That user is already muted and deafened server-wide."))
return
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"voiceban",
user,
author,
reason,
until=None,
channel=None,
)
await ctx.send(_("User has been banned from speaking or listening in voice channels."))
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
async def unban(self, ctx: commands.Context, user_id: RawUserIds, *, reason: str = None):
"""Unban a user from this server.
Requires specifying the target user's ID. To find this, you may either:
1. Copy it from the mod log case (if one was created), or
2. enable developer mode, go to Bans in this server's settings, right-
click the user and select 'Copy ID'."""
guild = ctx.guild
author = ctx.author
audit_reason = get_audit_reason(ctx.author, reason)
bans = await guild.bans()
bans = [be.user for be in bans]
user = discord.utils.get(bans, id=user_id)
if not user:
await ctx.send(_("It seems that user isn't banned!"))
return
queue_entry = (guild.id, user_id)
try:
await guild.unban(user, reason=audit_reason)
except discord.HTTPException:
await ctx.send(_("Something went wrong while attempting to unban that user."))
return
else:
await modlog.create_case(
self.bot,
guild,
ctx.message.created_at.replace(tzinfo=timezone.utc),
"unban",
user,
author,
reason,
until=None,
channel=None,
)
await ctx.send(_("Unbanned that user from this server."))
if await self.config.guild(guild).reinvite_on_unban():
user = ctx.bot.get_user(user_id)
if not user:
await ctx.send(
_("I don't share another server with this user. I can't reinvite them.")
)
return
invite = await self.get_invite_for_reinvite(ctx)
if invite:
try:
await user.send(
_(
"You've been unbanned from {server}.\n"
"Here is an invite for that server: {invite_link}"
).format(server=guild.name, invite_link=invite.url)
)
except discord.Forbidden:
await ctx.send(
_(
"I failed to send an invite to that user. "
"Perhaps you may be able to send it for me?\n"
"Here's the invite link: {invite_link}"
).format(invite_link=invite.url)
)
except discord.HTTPException:
await ctx.send(
_(
"Something went wrong when attempting to send that user"
"an invite. Here's the link so you can try: {invite_link}"
).format(invite_link=invite.url)
)
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4211_1 |
crossvul-python_data_good_4366_1 | """
Custom Authenticator to use Bitbucket OAuth with JupyterHub
"""
import json
import urllib
from tornado.auth import OAuth2Mixin
from tornado import web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from jupyterhub.auth import LocalAuthenticator
from traitlets import Set, default, observe
from .oauth2 import OAuthLoginHandler, OAuthenticator
def _api_headers(access_token):
return {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}".format(access_token),
}
class BitbucketOAuthenticator(OAuthenticator):
_deprecated_oauth_aliases = {
"team_whitelist": ("allowed_teams", "0.12.0"),
**OAuthenticator._deprecated_oauth_aliases,
}
login_service = "Bitbucket"
client_id_env = 'BITBUCKET_CLIENT_ID'
client_secret_env = 'BITBUCKET_CLIENT_SECRET'
@default("authorize_url")
def _authorize_url_default(self):
return "https://bitbucket.org/site/oauth2/authorize"
@default("token_url")
def _token_url_default(self):
return "https://bitbucket.org/site/oauth2/access_token"
team_whitelist = Set(help="Deprecated, use `BitbucketOAuthenticator.allowed_teams`", config=True,)
allowed_teams = Set(
config=True, help="Automatically allow members of selected teams"
)
headers = {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}",
}
async def authenticate(self, handler, data=None):
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
grant_type="authorization_code",
code=code,
redirect_uri=self.get_callback_url(handler),
)
url = url_concat("https://bitbucket.org/site/oauth2/access_token", params)
bb_header = {"Content-Type": "application/x-www-form-urlencoded;charset=utf-8"}
req = HTTPRequest(
url,
method="POST",
auth_username=self.client_id,
auth_password=self.client_secret,
body=urllib.parse.urlencode(params).encode('utf-8'),
headers=bb_header,
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
# Determine who the logged in user is
req = HTTPRequest(
"https://api.bitbucket.org/2.0/user",
method="GET",
headers=_api_headers(access_token),
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["username"]
# Check if user is a member of any allowed teams.
# This check is performed here, as the check requires `access_token`.
if self.allowed_teams:
user_in_team = await self._check_membership_allowed_teams(username, access_token)
if not user_in_team:
self.log.warning("%s not in team allowed list of users", username)
return None
return {
'name': username,
'auth_state': {'access_token': access_token, 'bitbucket_user': resp_json},
}
async def _check_membership_allowed_teams(self, username, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
# We verify the team membership by calling teams endpoint.
next_page = url_concat(
"https://api.bitbucket.org/2.0/teams", {'role': 'member'}
)
while next_page:
req = HTTPRequest(next_page, method="GET", headers=headers)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
next_page = resp_json.get('next', None)
user_teams = set([entry["username"] for entry in resp_json["values"]])
# check if any of the organizations seen thus far are in the allowed list
if len(self.allowed_teams & user_teams) > 0:
return True
return False
class LocalBitbucketOAuthenticator(LocalAuthenticator, BitbucketOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4366_1 |
crossvul-python_data_bad_4366_2 | """CILogon OAuthAuthenticator for JupyterHub
Uses OAuth 2.0 with cilogon.org (override with CILOGON_HOST)
Caveats:
- For allowed user list /admin purposes, username will be the ePPN by default.
This is typically an email address and may not work as a Unix userid.
Normalization may be required to turn the JupyterHub username into a Unix username.
- Default username_claim of ePPN does not work for all providers,
e.g. generic OAuth such as Google.
Use `c.CILogonOAuthenticator.username_claim = 'email'` to use
email instead of ePPN as the JupyterHub username.
"""
import json
import os
from tornado.auth import OAuth2Mixin
from tornado import web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from traitlets import Unicode, List, Bool, default, validate, observe
from jupyterhub.auth import LocalAuthenticator
from .oauth2 import OAuthLoginHandler, OAuthenticator
class CILogonLoginHandler(OAuthLoginHandler):
"""See http://www.cilogon.org/oidc for general information."""
def authorize_redirect(self, *args, **kwargs):
"""Add idp, skin to redirect params"""
extra_params = kwargs.setdefault('extra_params', {})
if self.authenticator.idp:
extra_params["selected_idp"] = self.authenticator.idp
if self.authenticator.skin:
extra_params["skin"] = self.authenticator.skin
return super().authorize_redirect(*args, **kwargs)
class CILogonOAuthenticator(OAuthenticator):
_deprecated_aliases = {
"idp_whitelist": ("allowed_idps", "0.12.0"),
}
@observe(*list(_deprecated_aliases))
def _deprecated_trait(self, change):
super()._deprecated_trait(change)
login_service = "CILogon"
client_id_env = 'CILOGON_CLIENT_ID'
client_secret_env = 'CILOGON_CLIENT_SECRET'
login_handler = CILogonLoginHandler
cilogon_host = Unicode(os.environ.get("CILOGON_HOST") or "cilogon.org", config=True)
@default("authorize_url")
def _authorize_url_default(self):
return "https://%s/authorize" % self.cilogon_host
@default("token_url")
def _token_url(self):
return "https://%s/oauth2/token" % self.cilogon_host
scope = List(
Unicode(),
default_value=['openid', 'email', 'org.cilogon.userinfo'],
config=True,
help="""The OAuth scopes to request.
See cilogon_scope.md for details.
At least 'openid' is required.
""",
)
@validate('scope')
def _validate_scope(self, proposal):
"""ensure openid is requested"""
if 'openid' not in proposal.value:
return ['openid'] + proposal.value
return proposal.value
idp_whitelist = List(help="Deprecated, use `CIlogonOAuthenticator.allowed_idps`", config=True,)
allowed_idps = List(
config=True,
help="""A list of IDP which can be stripped from the username after the @ sign.""",
)
strip_idp_domain = Bool(
False,
config=True,
help="""Remove the IDP domain from the username. Note that only domains which
appear in the `allowed_idps` will be stripped.""",
)
idp = Unicode(
config=True,
help="""The `idp` attribute is the SAML Entity ID of the user's selected
identity provider.
See https://cilogon.org/include/idplist.xml for the list of identity
providers supported by CILogon.
""",
)
skin = Unicode(
config=True,
help="""The `skin` attribute is the name of the custom CILogon interface skin
for your application.
Contact help@cilogon.org to request a custom skin.
""",
)
username_claim = Unicode(
"eppn",
config=True,
help="""The claim in the userinfo response from which to get the JupyterHub username
Examples include: eppn, email
What keys are available will depend on the scopes requested.
See http://www.cilogon.org/oidc for details.
""",
)
additional_username_claims = List(
config=True,
help="""Additional claims to check if the username_claim fails.
This is useful for linked identities where not all of them return
the primary username_claim.
""",
)
async def authenticate(self, handler, data=None):
"""We set up auth_state based on additional CILogon info if we
receive it.
"""
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a CILogon Access Token
# See: http://www.cilogon.org/oidc
headers = {"Accept": "application/json", "User-Agent": "JupyterHub"}
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=self.oauth_callback_url,
code=code,
grant_type='authorization_code',
)
url = url_concat(self.token_url, params)
req = HTTPRequest(url, headers=headers, method="POST", body='')
resp = await http_client.fetch(req)
token_response = json.loads(resp.body.decode('utf8', 'replace'))
access_token = token_response['access_token']
self.log.info("Access token acquired.")
# Determine who the logged in user is
params = dict(access_token=access_token)
req = HTTPRequest(
url_concat("https://%s/oauth2/userinfo" % self.cilogon_host, params),
headers=headers,
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
claimlist = [self.username_claim]
if self.additional_username_claims:
claimlist.extend(self.additional_username_claims)
for claim in claimlist:
username = resp_json.get(claim)
if username:
break
if not username:
if len(claimlist) < 2:
self.log.error(
"Username claim %s not found in response: %s",
self.username_claim,
sorted(resp_json.keys()),
)
else:
self.log.error(
"No username claim from %r in response: %s",
claimlist,
sorted(resp_json.keys()),
)
raise web.HTTPError(500, "Failed to get username from CILogon")
if self.allowed_idps:
gotten_name, gotten_idp = username.split('@')
if gotten_idp not in self.allowed_idps:
self.log.error(
"Trying to login from not allowed domain %s", gotten_idp
)
raise web.HTTPError(500, "Trying to login from a domain not allowed")
if len(self.allowed_idps) == 1 and self.strip_idp_domain:
username = gotten_name
userdict = {"name": username}
# Now we set up auth_state
userdict["auth_state"] = auth_state = {}
# Save the token response and full CILogon reply in auth state
# These can be used for user provisioning
# in the Lab/Notebook environment.
auth_state['token_response'] = token_response
# store the whole user model in auth_state.cilogon_user
# keep access_token as well, in case anyone was relying on it
auth_state['access_token'] = access_token
auth_state['cilogon_user'] = resp_json
return userdict
class LocalCILogonOAuthenticator(LocalAuthenticator, CILogonOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4366_2 |
crossvul-python_data_good_4098_0 | from collections import defaultdict
from datetime import datetime
from debts import settle
from flask import current_app, g
from flask_sqlalchemy import BaseQuery, SQLAlchemy
from itsdangerous import (
BadSignature,
SignatureExpired,
TimedJSONWebSignatureSerializer,
URLSafeSerializer,
)
import sqlalchemy
from sqlalchemy import orm
from sqlalchemy.sql import func
from sqlalchemy_continuum import make_versioned, version_class
from sqlalchemy_continuum.plugins import FlaskPlugin
from werkzeug.security import generate_password_hash
from ihatemoney.patch_sqlalchemy_continuum import PatchedBuilder
from ihatemoney.versioning import (
ConditionalVersioningManager,
LoggingMode,
get_ip_if_allowed,
version_privacy_predicate,
)
make_versioned(
user_cls=None,
manager=ConditionalVersioningManager(
# Conditionally Disable the versioning based on each
# project's privacy preferences
tracking_predicate=version_privacy_predicate,
# Patch in a fix to a SQLAchemy-Continuum Bug.
# See patch_sqlalchemy_continuum.py
builder=PatchedBuilder(),
),
plugins=[
FlaskPlugin(
# Redirect to our own function, which respects user preferences
# on IP address collection
remote_addr_factory=get_ip_if_allowed,
# Suppress the plugin's attempt to grab a user id,
# which imports the flask_login module (causing an error)
current_user_id_factory=lambda: None,
)
],
)
db = SQLAlchemy()
class Project(db.Model):
class ProjectQuery(BaseQuery):
def get_by_name(self, name):
return Project.query.filter(Project.name == name).one()
# Direct SQLAlchemy-Continuum to track changes to this model
__versioned__ = {}
id = db.Column(db.String(64), primary_key=True)
name = db.Column(db.UnicodeText)
password = db.Column(db.String(128))
contact_email = db.Column(db.String(128))
logging_preference = db.Column(
db.Enum(LoggingMode),
default=LoggingMode.default(),
nullable=False,
server_default=LoggingMode.default().name,
)
members = db.relationship("Person", backref="project")
query_class = ProjectQuery
default_currency = db.Column(db.String(3))
@property
def _to_serialize(self):
obj = {
"id": self.id,
"name": self.name,
"contact_email": self.contact_email,
"logging_preference": self.logging_preference.value,
"members": [],
"default_currency": self.default_currency,
}
balance = self.balance
for member in self.members:
member_obj = member._to_serialize
member_obj["balance"] = balance.get(member.id, 0)
obj["members"].append(member_obj)
return obj
@property
def active_members(self):
return [m for m in self.members if m.activated]
@property
def balance(self):
balances, should_pay, should_receive = (defaultdict(int) for time in (1, 2, 3))
# for each person
for person in self.members:
# get the list of bills he has to pay
bills = Bill.query.options(orm.subqueryload(Bill.owers)).filter(
Bill.owers.contains(person)
)
for bill in bills.all():
if person != bill.payer:
share = bill.pay_each() * person.weight
should_pay[person] += share
should_receive[bill.payer] += share
for person in self.members:
balance = should_receive[person] - should_pay[person]
balances[person.id] = balance
return balances
@property
def members_stats(self):
"""Compute what each member has paid
:return: one stat dict per member
:rtype list:
"""
return [
{
"member": member,
"paid": sum(
[
bill.converted_amount
for bill in self.get_member_bills(member.id).all()
]
),
"spent": sum(
[
bill.pay_each() * member.weight
for bill in self.get_bills().all()
if member in bill.owers
]
),
"balance": self.balance[member.id],
}
for member in self.active_members
]
@property
def monthly_stats(self):
"""Compute expenses by month
:return: a dict of years mapping to a dict of months mapping to the amount
:rtype dict:
"""
monthly = defaultdict(lambda: defaultdict(float))
for bill in self.get_bills().all():
monthly[bill.date.year][bill.date.month] += bill.converted_amount
return monthly
@property
def uses_weights(self):
return len([i for i in self.members if i.weight != 1]) > 0
def get_transactions_to_settle_bill(self, pretty_output=False):
"""Return a list of transactions that could be made to settle the bill"""
def prettify(transactions, pretty_output):
""" Return pretty transactions
"""
if not pretty_output:
return transactions
pretty_transactions = []
for transaction in transactions:
pretty_transactions.append(
{
"ower": transaction["ower"].name,
"receiver": transaction["receiver"].name,
"amount": round(transaction["amount"], 2),
}
)
return pretty_transactions
# cache value for better performance
members = {person.id: person for person in self.members}
settle_plan = settle(self.balance.items()) or []
transactions = [
{
"ower": members[ower_id],
"receiver": members[receiver_id],
"amount": amount,
}
for ower_id, amount, receiver_id in settle_plan
]
return prettify(transactions, pretty_output)
def exactmatch(self, credit, debts):
"""Recursively try and find subsets of 'debts' whose sum is equal to credit"""
if not debts:
return None
if debts[0]["balance"] > credit:
return self.exactmatch(credit, debts[1:])
elif debts[0]["balance"] == credit:
return [debts[0]]
else:
match = self.exactmatch(credit - debts[0]["balance"], debts[1:])
if match:
match.append(debts[0])
else:
match = self.exactmatch(credit, debts[1:])
return match
def has_bills(self):
"""return if the project do have bills or not"""
return self.get_bills().count() > 0
def get_bills(self):
"""Return the list of bills related to this project"""
return (
Bill.query.join(Person, Project)
.filter(Bill.payer_id == Person.id)
.filter(Person.project_id == Project.id)
.filter(Project.id == self.id)
.order_by(Bill.date.desc())
.order_by(Bill.creation_date.desc())
.order_by(Bill.id.desc())
)
def get_member_bills(self, member_id):
"""Return the list of bills related to a specific member"""
return (
Bill.query.join(Person, Project)
.filter(Bill.payer_id == Person.id)
.filter(Person.project_id == Project.id)
.filter(Person.id == member_id)
.filter(Project.id == self.id)
.order_by(Bill.date.desc())
.order_by(Bill.id.desc())
)
def get_pretty_bills(self, export_format="json"):
"""Return a list of project's bills with pretty formatting"""
bills = self.get_bills()
pretty_bills = []
for bill in bills:
if export_format == "json":
owers = [ower.name for ower in bill.owers]
else:
owers = ", ".join([ower.name for ower in bill.owers])
pretty_bills.append(
{
"what": bill.what,
"amount": round(bill.amount, 2),
"date": str(bill.date),
"payer_name": Person.query.get(bill.payer_id).name,
"payer_weight": Person.query.get(bill.payer_id).weight,
"owers": owers,
}
)
return pretty_bills
def remove_member(self, member_id):
"""Remove a member from the project.
If the member is not bound to a bill, then he is deleted, otherwise
he is only deactivated.
This method returns the status DELETED or DEACTIVATED regarding the
changes made.
"""
try:
person = Person.query.get(member_id, self)
except orm.exc.NoResultFound:
return None
if not person.has_bills():
db.session.delete(person)
db.session.commit()
else:
person.activated = False
db.session.commit()
return person
def remove_project(self):
db.session.delete(self)
db.session.commit()
def generate_token(self, expiration=0):
"""Generate a timed and serialized JsonWebToken
:param expiration: Token expiration time (in seconds)
"""
if expiration:
serializer = TimedJSONWebSignatureSerializer(
current_app.config["SECRET_KEY"], expiration
)
token = serializer.dumps({"project_id": self.id}).decode("utf-8")
else:
serializer = URLSafeSerializer(current_app.config["SECRET_KEY"])
token = serializer.dumps({"project_id": self.id})
return token
@staticmethod
def verify_token(token, token_type="timed_token"):
"""Return the project id associated to the provided token,
None if the provided token is expired or not valid.
:param token: Serialized TimedJsonWebToken
"""
if token_type == "timed_token":
serializer = TimedJSONWebSignatureSerializer(
current_app.config["SECRET_KEY"]
)
else:
serializer = URLSafeSerializer(current_app.config["SECRET_KEY"])
try:
data = serializer.loads(token)
except SignatureExpired:
return None
except BadSignature:
return None
return data["project_id"]
def __str__(self):
return self.name
def __repr__(self):
return f"<Project {self.name}>"
@staticmethod
def create_demo_project():
project = Project(
id="demo",
name="demonstration",
password=generate_password_hash("demo"),
contact_email="demo@notmyidea.org",
default_currency="EUR",
)
db.session.add(project)
db.session.commit()
members = {}
for name in ("Amina", "Georg", "Alice"):
person = Person()
person.name = name
person.project = project
person.weight = 1
db.session.add(person)
members[name] = person
db.session.commit()
operations = (
("Georg", 200, ("Amina", "Georg", "Alice"), "Food shopping"),
("Alice", 20, ("Amina", "Alice"), "Beer !"),
("Amina", 50, ("Amina", "Alice", "Georg"), "AMAP"),
)
for (payer, amount, owers, subject) in operations:
bill = Bill()
bill.payer_id = members[payer].id
bill.what = subject
bill.owers = [members[name] for name in owers]
bill.amount = amount
bill.original_currency = "EUR"
bill.converted_amount = amount
db.session.add(bill)
db.session.commit()
return project
class Person(db.Model):
class PersonQuery(BaseQuery):
def get_by_name(self, name, project):
return (
Person.query.filter(Person.name == name)
.filter(Person.project_id == project.id)
.one()
)
def get(self, id, project=None):
if not project:
project = g.project
return (
Person.query.filter(Person.id == id)
.filter(Person.project_id == project.id)
.one()
)
query_class = PersonQuery
# Direct SQLAlchemy-Continuum to track changes to this model
__versioned__ = {}
__table_args__ = {"sqlite_autoincrement": True}
id = db.Column(db.Integer, primary_key=True)
project_id = db.Column(db.String(64), db.ForeignKey("project.id"))
bills = db.relationship("Bill", backref="payer")
name = db.Column(db.UnicodeText)
weight = db.Column(db.Float, default=1)
activated = db.Column(db.Boolean, default=True)
@property
def _to_serialize(self):
return {
"id": self.id,
"name": self.name,
"weight": self.weight,
"activated": self.activated,
}
def has_bills(self):
"""return if the user do have bills or not"""
bills_as_ower_number = (
db.session.query(billowers)
.filter(billowers.columns.get("person_id") == self.id)
.count()
)
return bills_as_ower_number != 0 or len(self.bills) != 0
def __str__(self):
return self.name
def __repr__(self):
return f"<Person {self.name} for project {self.project.name}>"
# We need to manually define a join table for m2m relations
billowers = db.Table(
"billowers",
db.Column("bill_id", db.Integer, db.ForeignKey("bill.id"), primary_key=True),
db.Column("person_id", db.Integer, db.ForeignKey("person.id"), primary_key=True),
sqlite_autoincrement=True,
)
class Bill(db.Model):
class BillQuery(BaseQuery):
def get(self, project, id):
try:
return (
self.join(Person, Project)
.filter(Bill.payer_id == Person.id)
.filter(Person.project_id == Project.id)
.filter(Project.id == project.id)
.filter(Bill.id == id)
.one()
)
except orm.exc.NoResultFound:
return None
def delete(self, project, id):
bill = self.get(project, id)
if bill:
db.session.delete(bill)
return bill
query_class = BillQuery
# Direct SQLAlchemy-Continuum to track changes to this model
__versioned__ = {}
__table_args__ = {"sqlite_autoincrement": True}
id = db.Column(db.Integer, primary_key=True)
payer_id = db.Column(db.Integer, db.ForeignKey("person.id"))
owers = db.relationship(Person, secondary=billowers)
amount = db.Column(db.Float)
date = db.Column(db.Date, default=datetime.now)
creation_date = db.Column(db.Date, default=datetime.now)
what = db.Column(db.UnicodeText)
external_link = db.Column(db.UnicodeText)
original_currency = db.Column(db.String(3))
converted_amount = db.Column(db.Float)
archive = db.Column(db.Integer, db.ForeignKey("archive.id"))
@property
def _to_serialize(self):
return {
"id": self.id,
"payer_id": self.payer_id,
"owers": self.owers,
"amount": self.amount,
"date": self.date,
"creation_date": self.creation_date,
"what": self.what,
"external_link": self.external_link,
"original_currency": self.original_currency,
"converted_amount": self.converted_amount,
}
def pay_each_default(self, amount):
"""Compute what each share has to pay"""
if self.owers:
weights = (
db.session.query(func.sum(Person.weight))
.join(billowers, Bill)
.filter(Bill.id == self.id)
).scalar()
return amount / weights
else:
return 0
def __str__(self):
return self.what
def pay_each(self):
return self.pay_each_default(self.converted_amount)
def __repr__(self):
return (
f"<Bill of {self.amount} from {self.payer} for "
f"{', '.join([o.name for o in self.owers])}>"
)
class Archive(db.Model):
id = db.Column(db.Integer, primary_key=True)
project_id = db.Column(db.String(64), db.ForeignKey("project.id"))
name = db.Column(db.UnicodeText)
@property
def start_date(self):
pass
@property
def end_date(self):
pass
def __repr__(self):
return "<Archive>"
sqlalchemy.orm.configure_mappers()
PersonVersion = version_class(Person)
ProjectVersion = version_class(Project)
BillVersion = version_class(Bill)
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4098_0 |
crossvul-python_data_bad_2493_1 | from __future__ import absolute_import
from typing import Any, Optional, Tuple, List, Set, Iterable, Mapping, Callable, Dict
from django.utils.translation import ugettext as _
from django.conf import settings
from django.db import transaction
from django.http import HttpRequest, HttpResponse
from zerver.lib.request import JsonableError, REQ, has_request_variables
from zerver.decorator import authenticated_json_post_view, \
authenticated_json_view, \
get_user_profile_by_email, require_realm_admin, to_non_negative_int
from zerver.lib.actions import bulk_remove_subscriptions, \
do_change_subscription_property, internal_prep_message, \
create_streams_if_needed, gather_subscriptions, subscribed_to_stream, \
bulk_add_subscriptions, do_send_messages, get_subscriber_emails, do_rename_stream, \
do_deactivate_stream, do_make_stream_public, do_add_default_stream, \
do_change_stream_description, do_get_streams, do_make_stream_private, \
do_remove_default_stream, get_topic_history_for_stream
from zerver.lib.response import json_success, json_error, json_response
from zerver.lib.validator import check_string, check_list, check_dict, \
check_bool, check_variable_type
from zerver.models import UserProfile, Stream, Realm, Subscription, \
Recipient, get_recipient, get_stream, bulk_get_streams, \
bulk_get_recipients, valid_stream_name, get_active_user_dicts_in_realm
from collections import defaultdict
import ujson
from six.moves import urllib
import six
from typing import Text
def is_active_subscriber(user_profile, recipient):
# type: (UserProfile, Recipient) -> bool
return Subscription.objects.filter(user_profile=user_profile,
recipient=recipient,
active=True).exists()
def list_to_streams(streams_raw, user_profile, autocreate=False):
# type: (Iterable[Mapping[str, Any]], UserProfile, Optional[bool]) -> Tuple[List[Stream], List[Stream]]
"""Converts list of dicts to a list of Streams, validating input in the process
For each stream name, we validate it to ensure it meets our
requirements for a proper stream name: that is, that it is shorter
than Stream.MAX_NAME_LENGTH characters and passes
valid_stream_name.
This function in autocreate mode should be atomic: either an exception will be raised
during a precheck, or all the streams specified will have been created if applicable.
@param streams_raw The list of stream dictionaries to process;
names should already be stripped of whitespace by the caller.
@param user_profile The user for whom we are retreiving the streams
@param autocreate Whether we should create streams if they don't already exist
"""
# Validate all streams, getting extant ones, then get-or-creating the rest.
stream_set = set(stream_dict["name"] for stream_dict in streams_raw)
for stream_name in stream_set:
# Stream names should already have been stripped by the
# caller, but it makes sense to verify anyway.
assert stream_name == stream_name.strip()
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(_("Stream name (%s) too long.") % (stream_name,))
if not valid_stream_name(stream_name):
raise JsonableError(_("Invalid stream name (%s).") % (stream_name,))
existing_streams = [] # type: List[Stream]
missing_stream_dicts = [] # type: List[Mapping[str, Any]]
existing_stream_map = bulk_get_streams(user_profile.realm, stream_set)
for stream_dict in streams_raw:
stream_name = stream_dict["name"]
stream = existing_stream_map.get(stream_name.lower())
if stream is None:
missing_stream_dicts.append(stream_dict)
else:
existing_streams.append(stream)
if len(missing_stream_dicts) == 0:
# This is the happy path for callers who expected all of these
# streams to exist already.
created_streams = [] # type: List[Stream]
else:
# autocreate=True path starts here
if not user_profile.can_create_streams():
raise JsonableError(_('User cannot create streams.'))
elif not autocreate:
raise JsonableError(_("Stream(s) (%s) do not exist") % ", ".join(
stream_dict["name"] for stream_dict in missing_stream_dicts))
# We already filtered out existing streams, so dup_streams
# will normally be an empty list below, but we protect against somebody
# else racing to create the same stream. (This is not an entirely
# paranoid approach, since often on Zulip two people will discuss
# creating a new stream, and both people eagerly do it.)
created_streams, dup_streams = create_streams_if_needed(realm=user_profile.realm,
stream_dicts=missing_stream_dicts)
existing_streams += dup_streams
return existing_streams, created_streams
class PrincipalError(JsonableError):
def __init__(self, principal, status_code=403):
# type: (Text, int) -> None
self.principal = principal # type: Text
self.status_code = status_code # type: int
def to_json_error_msg(self):
# type: () -> Text
return ("User not authorized to execute queries on behalf of '%s'"
% (self.principal,))
def principal_to_user_profile(agent, principal):
# type: (UserProfile, Text) -> UserProfile
principal_doesnt_exist = False
try:
principal_user_profile = get_user_profile_by_email(principal)
except UserProfile.DoesNotExist:
principal_doesnt_exist = True
if (principal_doesnt_exist or
agent.realm != principal_user_profile.realm):
# We have to make sure we don't leak information about which users
# are registered for Zulip in a different realm. We could do
# something a little more clever and check the domain part of the
# principal to maybe give a better error message
raise PrincipalError(principal)
return principal_user_profile
@require_realm_admin
def deactivate_stream_backend(request, user_profile, stream_id):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
target = get_and_validate_stream_by_id(stream_id, user_profile.realm)
if target.invite_only and not subscribed_to_stream(user_profile, target):
return json_error(_('Cannot administer invite-only streams this way'))
do_deactivate_stream(target)
return json_success()
@require_realm_admin
@has_request_variables
def add_default_stream(request, user_profile, stream_name=REQ()):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
do_add_default_stream(user_profile.realm, stream_name)
return json_success()
@require_realm_admin
@has_request_variables
def remove_default_stream(request, user_profile, stream_name=REQ()):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
do_remove_default_stream(user_profile.realm, stream_name)
return json_success()
@require_realm_admin
@has_request_variables
def update_stream_backend(request, user_profile, stream_id,
description=REQ(validator=check_string, default=None),
is_private=REQ(validator=check_bool, default=None),
new_name=REQ(validator=check_string, default=None)):
# type: (HttpRequest, UserProfile, int, Optional[Text], Optional[bool], Optional[Text]) -> HttpResponse
stream = get_and_validate_stream_by_id(stream_id, user_profile.realm)
stream_name = stream.name
if description is not None:
do_change_stream_description(user_profile.realm, stream_name, description)
if stream_name is not None and new_name is not None:
do_rename_stream(user_profile.realm, stream_name, new_name)
if is_private is not None:
if is_private:
do_make_stream_private(user_profile.realm, stream_name)
else:
do_make_stream_public(user_profile, user_profile.realm, stream_name)
return json_success()
def list_subscriptions_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return json_success({"subscriptions": gather_subscriptions(user_profile)[0]})
FuncKwargPair = Tuple[Callable[..., HttpResponse], Dict[str, Iterable[Any]]]
@has_request_variables
def update_subscriptions_backend(request, user_profile,
delete=REQ(validator=check_list(check_string), default=[]),
add=REQ(validator=check_list(check_dict([('name', check_string)])), default=[])):
# type: (HttpRequest, UserProfile, Iterable[Text], Iterable[Mapping[str, Any]]) -> HttpResponse
if not add and not delete:
return json_error(_('Nothing to do. Specify at least one of "add" or "delete".'))
method_kwarg_pairs = [
(add_subscriptions_backend, dict(streams_raw=add)),
(remove_subscriptions_backend, dict(streams_raw=delete))
] # type: List[FuncKwargPair]
return compose_views(request, user_profile, method_kwarg_pairs)
def compose_views(request, user_profile, method_kwarg_pairs):
# type: (HttpRequest, UserProfile, List[FuncKwargPair]) -> HttpResponse
'''
This takes a series of view methods from method_kwarg_pairs and calls
them in sequence, and it smushes all the json results into a single
response when everything goes right. (This helps clients avoid extra
latency hops.) It rolls back the transaction when things go wrong in
any one of the composed methods.
TODO: Move this a utils-like module if we end up using it more widely.
'''
json_dict = {} # type: Dict[str, Any]
with transaction.atomic():
for method, kwargs in method_kwarg_pairs:
response = method(request, user_profile, **kwargs)
if response.status_code != 200:
raise JsonableError(response.content)
json_dict.update(ujson.loads(response.content))
return json_success(json_dict)
@authenticated_json_post_view
def json_remove_subscriptions(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return remove_subscriptions_backend(request, user_profile)
@has_request_variables
def remove_subscriptions_backend(request, user_profile,
streams_raw = REQ("subscriptions", validator=check_list(check_string)),
principals = REQ(validator=check_list(check_string), default=None)):
# type: (HttpRequest, UserProfile, Iterable[Text], Optional[Iterable[Text]]) -> HttpResponse
removing_someone_else = principals and \
set(principals) != set((user_profile.email,))
if removing_someone_else and not user_profile.is_realm_admin:
# You can only unsubscribe other people from a stream if you are a realm
# admin.
return json_error(_("This action requires administrative rights"))
streams_as_dict = []
for stream_name in streams_raw:
streams_as_dict.append({"name": stream_name.strip()})
streams, __ = list_to_streams(streams_as_dict, user_profile)
for stream in streams:
if removing_someone_else and stream.invite_only and \
not subscribed_to_stream(user_profile, stream):
# Even as an admin, you can't remove other people from an
# invite-only stream you're not on.
return json_error(_("Cannot administer invite-only streams this way"))
if principals:
people_to_unsub = set(principal_to_user_profile(
user_profile, principal) for principal in principals)
else:
people_to_unsub = set([user_profile])
result = dict(removed=[], not_subscribed=[]) # type: Dict[str, List[Text]]
(removed, not_subscribed) = bulk_remove_subscriptions(people_to_unsub, streams)
for (subscriber, stream) in removed:
result["removed"].append(stream.name)
for (subscriber, stream) in not_subscribed:
result["not_subscribed"].append(stream.name)
return json_success(result)
def filter_stream_authorization(user_profile, streams):
# type: (UserProfile, Iterable[Stream]) -> Tuple[List[Stream], List[Stream]]
streams_subscribed = set() # type: Set[int]
recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams])
subs = Subscription.objects.filter(user_profile=user_profile,
recipient__in=list(recipients_map.values()),
active=True)
for sub in subs:
streams_subscribed.add(sub.recipient.type_id)
unauthorized_streams = [] # type: List[Stream]
for stream in streams:
# The user is authorized for his own streams
if stream.id in streams_subscribed:
continue
# The user is not authorized for invite_only streams
if stream.invite_only:
unauthorized_streams.append(stream)
authorized_streams = [stream for stream in streams if
stream.id not in set(stream.id for stream in unauthorized_streams)]
return authorized_streams, unauthorized_streams
@has_request_variables
def add_subscriptions_backend(request, user_profile,
streams_raw = REQ("subscriptions",
validator=check_list(check_dict([('name', check_string)]))),
invite_only = REQ(validator=check_bool, default=False),
announce = REQ(validator=check_bool, default=False),
principals = REQ(validator=check_list(check_string), default=None),
authorization_errors_fatal = REQ(validator=check_bool, default=True)):
# type: (HttpRequest, UserProfile, Iterable[Mapping[str, Text]], bool, bool, Optional[List[Text]], bool) -> HttpResponse
stream_dicts = []
for stream_dict in streams_raw:
stream_dict_copy = {} # type: Dict[str, Any]
for field in stream_dict:
stream_dict_copy[field] = stream_dict[field]
# Strip the stream name here.
stream_dict_copy['name'] = stream_dict_copy['name'].strip()
stream_dict_copy["invite_only"] = invite_only
stream_dicts.append(stream_dict_copy)
# Validation of the streams arguments, including enforcement of
# can_create_streams policy and valid_stream_name policy is inside
# list_to_streams.
existing_streams, created_streams = \
list_to_streams(stream_dicts, user_profile, autocreate=True)
authorized_streams, unauthorized_streams = \
filter_stream_authorization(user_profile, existing_streams)
if len(unauthorized_streams) > 0 and authorization_errors_fatal:
return json_error(_("Unable to access stream (%s).") % unauthorized_streams[0].name)
# Newly created streams are also authorized for the creator
streams = authorized_streams + created_streams
if principals is not None:
if user_profile.realm.is_zephyr_mirror_realm and not all(stream.invite_only for stream in streams):
return json_error(_("You can only invite other Zephyr mirroring users to invite-only streams."))
subscribers = set(principal_to_user_profile(user_profile, principal) for principal in principals)
else:
subscribers = set([user_profile])
(subscribed, already_subscribed) = bulk_add_subscriptions(streams, subscribers)
result = dict(subscribed=defaultdict(list), already_subscribed=defaultdict(list)) # type: Dict[str, Any]
for (subscriber, stream) in subscribed:
result["subscribed"][subscriber.email].append(stream.name)
for (subscriber, stream) in already_subscribed:
result["already_subscribed"][subscriber.email].append(stream.name)
private_streams = dict((stream.name, stream.invite_only) for stream in streams)
bots = dict((subscriber.email, subscriber.is_bot) for subscriber in subscribers)
# Inform the user if someone else subscribed them to stuff,
# or if a new stream was created with the "announce" option.
notifications = []
if principals and result["subscribed"]:
for email, subscriptions in six.iteritems(result["subscribed"]):
if email == user_profile.email:
# Don't send a Zulip if you invited yourself.
continue
if bots[email]:
# Don't send invitation Zulips to bots
continue
if len(subscriptions) == 1:
msg = ("Hi there! We thought you'd like to know that %s just "
"subscribed you to the%s stream #**%s**."
% (user_profile.full_name,
" **invite-only**" if private_streams[subscriptions[0]] else "",
subscriptions[0],
))
else:
msg = ("Hi there! We thought you'd like to know that %s just "
"subscribed you to the following streams: \n\n"
% (user_profile.full_name,))
for stream in subscriptions:
msg += "* #**%s**%s\n" % (
stream,
" (**invite-only**)" if private_streams[stream] else "")
if len([s for s in subscriptions if not private_streams[s]]) > 0:
msg += "\nYou can see historical content on a non-invite-only stream by narrowing to it."
notifications.append(internal_prep_message(
user_profile.realm, settings.NOTIFICATION_BOT,
"private", email, "", msg))
if announce and len(created_streams) > 0:
notifications_stream = user_profile.realm.notifications_stream
if notifications_stream is not None:
if len(created_streams) > 1:
stream_msg = "the following streams: %s" % (", ".join('#**%s**' % s.name for s in created_streams))
else:
stream_msg = "a new stream #**%s**." % created_streams[0].name
msg = ("%s just created %s" % (user_profile.full_name, stream_msg))
notifications.append(
internal_prep_message(user_profile.realm, settings.NOTIFICATION_BOT,
"stream",
notifications_stream.name, "Streams", msg))
else:
msg = ("Hi there! %s just created a new stream #**%s**."
% (user_profile.full_name, created_streams[0].name))
for realm_user_dict in get_active_user_dicts_in_realm(user_profile.realm):
# Don't announce to yourself or to people you explicitly added
# (who will get the notification above instead).
if realm_user_dict['email'] in principals or realm_user_dict['email'] == user_profile.email:
continue
notifications.append(internal_prep_message(
user_profile.realm, settings.NOTIFICATION_BOT,
"private",
realm_user_dict['email'], "", msg))
if len(notifications) > 0:
do_send_messages(notifications)
result["subscribed"] = dict(result["subscribed"])
result["already_subscribed"] = dict(result["already_subscribed"])
if not authorization_errors_fatal:
result["unauthorized"] = [stream.name for stream in unauthorized_streams]
return json_success(result)
@has_request_variables
def get_subscribers_backend(request, user_profile,
stream_id=REQ('stream', converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
stream = get_and_validate_stream_by_id(stream_id, user_profile.realm)
subscribers = get_subscriber_emails(stream, user_profile)
return json_success({'subscribers': subscribers})
# By default, lists all streams that the user has access to --
# i.e. public streams plus invite-only streams that the user is on
@has_request_variables
def get_streams_backend(request, user_profile,
include_public=REQ(validator=check_bool, default=True),
include_subscribed=REQ(validator=check_bool, default=True),
include_all_active=REQ(validator=check_bool, default=False),
include_default=REQ(validator=check_bool, default=False)):
# type: (HttpRequest, UserProfile, bool, bool, bool, bool) -> HttpResponse
streams = do_get_streams(user_profile, include_public=include_public,
include_subscribed=include_subscribed,
include_all_active=include_all_active,
include_default=include_default)
return json_success({"streams": streams})
@has_request_variables
def get_topics_backend(request, user_profile,
stream_id=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
stream = get_and_validate_stream_by_id(stream_id, user_profile.realm)
if stream.realm_id != user_profile.realm_id:
return json_error(_("Invalid stream id"))
recipient = get_recipient(Recipient.STREAM, stream.id)
if not stream.is_public():
if not is_active_subscriber(user_profile=user_profile,
recipient=recipient):
return json_error(_("Invalid stream id"))
result = get_topic_history_for_stream(
user_profile=user_profile,
recipient=recipient,
)
# Our data structure here is a list of tuples of
# (topic name, unread count), and it's reverse chronological,
# so the most recent topic is the first element of the list.
return json_success(dict(topics=result))
@authenticated_json_post_view
@has_request_variables
def json_stream_exists(request, user_profile, stream=REQ(),
autosubscribe=REQ(default=False)):
# type: (HttpRequest, UserProfile, Text, bool) -> HttpResponse
if not valid_stream_name(stream):
return json_error(_("Invalid characters in stream name"))
try:
stream_id = Stream.objects.get(realm=user_profile.realm, name=stream).id
except Stream.DoesNotExist:
stream_id = None
return stream_exists_backend(request, user_profile, stream_id, autosubscribe)
def stream_exists_backend(request, user_profile, stream_id, autosubscribe):
# type: (HttpRequest, UserProfile, int, bool) -> HttpResponse
try:
stream = get_and_validate_stream_by_id(stream_id, user_profile.realm)
except JsonableError:
stream = None
result = {"exists": bool(stream)}
if stream is not None:
recipient = get_recipient(Recipient.STREAM, stream.id)
if autosubscribe:
bulk_add_subscriptions([stream], [user_profile])
result["subscribed"] = is_active_subscriber(
user_profile=user_profile,
recipient=recipient)
return json_success(result) # results are ignored for HEAD requests
return json_response(data=result, status=404)
def get_and_validate_stream_by_id(stream_id, realm):
# type: (int, Realm) -> Stream
try:
stream = Stream.objects.get(pk=stream_id, realm_id=realm.id)
except Stream.DoesNotExist:
raise JsonableError(_("Invalid stream id"))
return stream
@has_request_variables
def json_get_stream_id(request, user_profile, stream=REQ()):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
stream_id = Stream.objects.get(realm=user_profile.realm, name=stream).id
except Stream.DoesNotExist:
return json_error(_("No such stream name"))
return json_success({'stream_id': stream_id})
def get_subscription_or_die(stream_name, user_profile):
# type: (Text, UserProfile) -> Subscription
stream = get_stream(stream_name, user_profile.realm)
if not stream:
raise JsonableError(_("Invalid stream %s") % (stream_name,))
recipient = get_recipient(Recipient.STREAM, stream.id)
subscription = Subscription.objects.filter(user_profile=user_profile,
recipient=recipient, active=True)
if not subscription.exists():
raise JsonableError(_("Not subscribed to stream %s") % (stream_name,))
return subscription
@authenticated_json_view
@has_request_variables
def json_subscription_property(request, user_profile, subscription_data=REQ(
validator=check_list(
check_dict([("stream", check_string),
("property", check_string),
("value", check_variable_type(
[check_string, check_bool]))])))):
# type: (HttpRequest, UserProfile, List[Dict[str, Any]]) -> HttpResponse
"""
This is the entry point to changing subscription properties. This
is a bulk endpoint: requestors always provide a subscription_data
list containing dictionaries for each stream of interest.
Requests are of the form:
[{"stream": "devel", "property": "in_home_view", "value": False},
{"stream": "devel", "property": "color", "value": "#c2c2c2"}]
"""
if request.method != "POST":
return json_error(_("Invalid verb"))
property_converters = {"color": check_string, "in_home_view": check_bool,
"desktop_notifications": check_bool,
"audible_notifications": check_bool,
"pin_to_top": check_bool}
response_data = []
for change in subscription_data:
stream_name = change["stream"]
property = change["property"]
value = change["value"]
if property not in property_converters:
return json_error(_("Unknown subscription property: %s") % (property,))
sub = get_subscription_or_die(stream_name, user_profile)[0]
property_conversion = property_converters[property](property, value)
if property_conversion:
return json_error(property_conversion)
do_change_subscription_property(user_profile, sub, stream_name,
property, value)
response_data.append({'stream': stream_name,
'property': property,
'value': value})
return json_success({"subscription_data": response_data})
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_2493_1 |
crossvul-python_data_bad_4544_0 | import qrcode
import qrcode.image.svg
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import SuccessURLAllowedHostsMixin
from django.http import HttpResponse
from django.shortcuts import resolve_url
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.http import is_safe_url
from django.views.decorators.cache import never_cache
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import (
DeleteView, FormView, ListView, UpdateView, View)
from django_otp import login as otp_login
from django_otp.plugins.otp_totp.models import TOTPDevice
from wagtail_2fa import forms, utils
from wagtail_2fa.mixins import OtpRequiredMixin
class LoginView(SuccessURLAllowedHostsMixin, FormView):
template_name = "wagtail_2fa/otp_form.html"
form_class = forms.TokenForm
redirect_field_name = REDIRECT_FIELD_NAME
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context[self.redirect_field_name] = self.get_redirect_url()
return context
def form_valid(self, form):
otp_login(self.request, self.request.user.otp_device)
return super().form_valid(form)
def get_redirect_url(self):
"""Return the user-originating redirect URL if it's safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name, self.request.GET.get(self.redirect_field_name, "")
)
url_is_safe = is_safe_url(
url=redirect_to,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
return redirect_to if url_is_safe else ""
def get_success_url(self):
url = self.get_redirect_url()
return url or resolve_url(settings.LOGIN_REDIRECT_URL)
class DeviceListView(OtpRequiredMixin, ListView):
template_name = "wagtail_2fa/device_list.html"
# require OTP if configured
if_configured = True
def get_queryset(self):
return TOTPDevice.objects.devices_for_user(self.kwargs['user_id'], confirmed=True)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['user_id'] = int(self.kwargs['user_id'])
return context
class DeviceCreateView(OtpRequiredMixin, FormView):
form_class = forms.DeviceForm
template_name = "wagtail_2fa/device_form.html"
# require OTP if configured
if_configured = True
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["request"] = self.request
kwargs["instance"] = self.device
return kwargs
def form_valid(self, form):
form.save()
utils.delete_unconfirmed_devices(self.request.user)
if not self.request.user.is_verified():
otp_login(self.request, form.instance)
return super().form_valid(form)
def get_success_url(self):
return reverse('wagtail_2fa_device_list', kwargs={'user_id': self.request.user.id})
@cached_property
def device(self):
if self.request.method.lower() == "get":
return utils.new_unconfirmed_device(self.request.user)
else:
return utils.get_unconfirmed_device(self.request.user)
class DeviceUpdateView(OtpRequiredMixin, UpdateView):
form_class = forms.DeviceForm
template_name = "wagtail_2fa/device_form.html"
def get_queryset(self):
return TOTPDevice.objects.devices_for_user(self.request.user, confirmed=True)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["request"] = self.request
return kwargs
def get_success_url(self):
return reverse('wagtail_2fa_device_list', kwargs={'user_id': self.request.user.id})
class DeviceDeleteView(OtpRequiredMixin, DeleteView):
template_name = "wagtail_2fa/device_confirm_delete.html"
def get_queryset(self):
device = TOTPDevice.objects.get(**self.kwargs)
return TOTPDevice.objects.devices_for_user(device.user, confirmed=True)
def get_success_url(self):
return reverse('wagtail_2fa_device_list', kwargs={'user_id': self.request.POST.get('user_id')})
class DeviceQRCodeView(OtpRequiredMixin, View):
# require OTP if configured
if_configured = True
def get(self, request):
device = utils.get_unconfirmed_device(self.request.user)
img = qrcode.make(device.config_url, image_factory=qrcode.image.svg.SvgImage)
response = HttpResponse(content_type="image/svg+xml")
img.save(response)
return response
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4544_0 |
crossvul-python_data_good_4366_6 | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
from urllib.parse import quote, urlparse
import uuid
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.log import app_log
from jupyterhub.handlers import BaseHandler
from jupyterhub.auth import Authenticator
from jupyterhub.utils import url_path_join
from traitlets import Unicode, Bool, List, Dict, default, observe
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4366_6 |
crossvul-python_data_good_4366_5 | """
Custom Authenticator to use Google OAuth with JupyterHub.
Derived from the GitHub OAuth authenticator.
"""
import os
import json
import urllib.parse
from tornado import gen
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from tornado.auth import GoogleOAuth2Mixin
from tornado.web import HTTPError
from traitlets import Dict, Unicode, List, default, validate, observe
from jupyterhub.crypto import decrypt, EncryptionUnavailable, InvalidToken
from jupyterhub.auth import LocalAuthenticator
from jupyterhub.utils import url_path_join
from .oauth2 import OAuthLoginHandler, OAuthCallbackHandler, OAuthenticator
def check_user_in_groups(member_groups, allowed_groups):
# Check if user is a member of any group in the allowed groups
if any(g in member_groups for g in allowed_groups):
return True # user _is_ in group
else:
return False
class GoogleOAuthenticator(OAuthenticator, GoogleOAuth2Mixin):
_deprecated_oauth_aliases = {
"google_group_whitelist": ("allowed_google_groups", "0.12.0"),
**OAuthenticator._deprecated_oauth_aliases,
}
google_api_url = Unicode("https://www.googleapis.com", config=True)
@default('google_api_url')
def _google_api_url(self):
"""get default google apis url from env"""
google_api_url = os.getenv('GOOGLE_API_URL')
# default to googleapis.com
if not google_api_url:
google_api_url = 'https://www.googleapis.com'
return google_api_url
@default('scope')
def _scope_default(self):
return ['openid', 'email']
@default("authorize_url")
def _authorize_url_default(self):
return "https://accounts.google.com/o/oauth2/v2/auth"
@default("token_url")
def _token_url_default(self):
return "%s/oauth2/v4/token" % (self.google_api_url)
google_service_account_keys = Dict(
Unicode(),
help="Service account keys to use with each domain, see https://developers.google.com/admin-sdk/directory/v1/guides/delegation"
).tag(config=True)
gsuite_administrator = Dict(
Unicode(),
help="Username of a G Suite Administrator for the service account to act as"
).tag(config=True)
google_group_whitelist = Dict(help="Deprecated, use `GoogleOAuthenticator.allowed_google_groups`", config=True,)
allowed_google_groups = Dict(
List(Unicode()),
help="Automatically allow members of selected groups"
).tag(config=True)
admin_google_groups = Dict(
List(Unicode()),
help="Groups whose members should have Jupyterhub admin privileges"
).tag(config=True)
user_info_url = Unicode(
"https://www.googleapis.com/oauth2/v1/userinfo", config=True
)
hosted_domain = List(
Unicode(),
config=True,
help="""List of domains used to restrict sign-in, e.g. mycollege.edu""",
)
@default('hosted_domain')
def _hosted_domain_from_env(self):
domains = []
for domain in os.environ.get('HOSTED_DOMAIN', '').split(';'):
if domain:
# check falsy to avoid trailing separators
# adding empty domains
domains.append(domain)
return domains
@validate('hosted_domain')
def _cast_hosted_domain(self, proposal):
"""handle backward-compatibility with hosted_domain is a single domain as a string"""
if isinstance(proposal.value, str):
# pre-0.9 hosted_domain was a string
# set it to a single item list
# (or if it's empty, an empty list)
if proposal.value == '':
return []
return [proposal.value]
return proposal.value
login_service = Unicode(
os.environ.get('LOGIN_SERVICE', 'Google'),
config=True,
help="""Google Apps hosted domain string, e.g. My College""",
)
async def authenticate(self, handler, data=None, google_groups=None):
code = handler.get_argument("code")
body = urllib.parse.urlencode(
dict(
code=code,
redirect_uri=self.get_callback_url(handler),
client_id=self.client_id,
client_secret=self.client_secret,
grant_type="authorization_code",
)
)
http_client = AsyncHTTPClient()
response = await http_client.fetch(
self.token_url,
method="POST",
headers={"Content-Type": "application/x-www-form-urlencoded"},
body=body,
)
user = json.loads(response.body.decode("utf-8", "replace"))
access_token = str(user['access_token'])
refresh_token = user.get('refresh_token', None)
response = await http_client.fetch(
self.user_info_url + '?access_token=' + access_token
)
if not response:
handler.clear_all_cookies()
raise HTTPError(500, 'Google authentication failed')
bodyjs = json.loads(response.body.decode())
user_email = username = bodyjs['email']
user_email_domain = user_email.split('@')[1]
if not bodyjs['verified_email']:
self.log.warning("Google OAuth unverified email attempt: %s", user_email)
raise HTTPError(403, "Google email {} not verified".format(user_email))
if self.hosted_domain:
if user_email_domain not in self.hosted_domain:
self.log.warning(
"Google OAuth unauthorized domain attempt: %s", user_email
)
raise HTTPError(
403,
"Google account domain @{} not authorized.".format(
user_email_domain
),
)
if len(self.hosted_domain) == 1:
# unambiguous domain, use only base name
username = user_email.split('@')[0]
if refresh_token is None:
self.log.debug("Refresh token was empty, will try to pull refresh_token from previous auth_state")
user = handler.find_user(username)
if user:
self.log.debug("encrypted_auth_state was found, will try to decrypt and pull refresh_token from it")
try:
encrypted = user.encrypted_auth_state
auth_state = await decrypt(encrypted)
refresh_token = auth_state.get('refresh_token')
except (ValueError, InvalidToken, EncryptionUnavailable) as e:
self.log.warning(
"Failed to retrieve encrypted auth_state for %s because %s",
username,
e,
)
user_info = {
'name': username,
'auth_state': {
'access_token': access_token,
'refresh_token': refresh_token,
'google_user': bodyjs
}
}
if self.admin_google_groups or self.allowed_google_groups:
user_info = await self._add_google_groups_info(user_info, google_groups)
return user_info
def _service_client_credentials(self, scopes, user_email_domain):
"""
Return a configured service client credentials for the API.
"""
try:
from google.oauth2 import service_account
except:
raise ImportError(
"Could not import google.oauth2's service_account,"
"you may need to run pip install oauthenticator[googlegroups] or not declare google groups"
)
gsuite_administrator_email = "{}@{}".format(self.gsuite_administrator[user_email_domain], user_email_domain)
self.log.debug("scopes are %s, user_email_domain is %s", scopes, user_email_domain)
credentials = service_account.Credentials.from_service_account_file(
self.google_service_account_keys[user_email_domain],
scopes=scopes
)
credentials = credentials.with_subject(gsuite_administrator_email)
return credentials
def _service_client(self, service_name, service_version, credentials, http=None):
"""
Return a configured service client for the API.
"""
try:
from googleapiclient.discovery import build
except:
raise ImportError(
"Could not import googleapiclient.discovery's build,"
"you may need to run pip install oauthenticator[googlegroups] or not declare google groups"
)
self.log.debug("service_name is %s, service_version is %s", service_name, service_version)
return build(
serviceName=service_name,
version=service_version,
credentials=credentials,
cache_discovery=False,
http=http)
async def _google_groups_for_user(self, user_email, credentials, http=None):
"""
Return google groups a given user is a member of
"""
service = self._service_client(
service_name='admin',
service_version='directory_v1',
credentials=credentials,
http=http)
results = service.groups().list(userKey=user_email).execute()
results = [ g['email'].split('@')[0] for g in results.get('groups', [{'email': None}]) ]
self.log.debug("user_email %s is a member of %s", user_email, results)
return results
async def _add_google_groups_info(self, user_info, google_groups=None):
user_email_domain=user_info['auth_state']['google_user']['hd']
user_email=user_info['auth_state']['google_user']['email']
if google_groups is None:
credentials = self._service_client_credentials(
scopes=['%s/auth/admin.directory.group.readonly' % (self.google_api_url)],
user_email_domain=user_email_domain)
google_groups = await self._google_groups_for_user(
user_email=user_email,
credentials=credentials)
user_info['auth_state']['google_user']['google_groups'] = google_groups
# Check if user is a member of any admin groups.
if self.admin_google_groups:
is_admin = check_user_in_groups(google_groups, self.admin_google_groups[user_email_domain])
# Check if user is a member of any allowed groups.
user_in_group = check_user_in_groups(google_groups, self.allowed_google_groups[user_email_domain])
if self.admin_google_groups and (is_admin or user_in_group):
user_info['admin'] = is_admin
return user_info
elif user_in_group:
return user_info
else:
return None
class LocalGoogleOAuthenticator(LocalAuthenticator, GoogleOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| ./CrossVul/dataset_final_sorted/CWE-863/py/good_4366_5 |
crossvul-python_data_bad_4126_1 | #!/usr/bin/env python
# Copyright 2017, New York University and the TUF contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
"""
<Program Name>
settings.py
<Author>
Vladimir Diaz <vladimir.v.diaz@gmail.com>
<Started>
January 11, 2017
<Copyright>
See LICENSE-MIT OR LICENSE for licensing information.
<Purpose>
A central location for TUF configuration settings. Example options include
setting the destination of temporary files and downloaded content, the maximum
length of downloaded metadata (unknown file attributes), and download
behavior.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
# Set a directory that should be used for all temporary files. If this
# is None, then the system default will be used. The system default
# will also be used if a directory path set here is invalid or
# unusable.
temporary_directory = None
# Set a local directory to store metadata that is requested from mirrors. This
# directory contains subdirectories for different repositories, where each
# subdirectory contains a different set of metadata. For example:
# tuf.settings.repositories_directory = /tmp/repositories. The root file for a
# repository named 'django_repo' can be found at:
# /tmp/repositories/django_repo/metadata/current/root.METADATA_EXTENSION
repositories_directory = None
# The 'log.py' module manages TUF's logging system. Users have the option to
# enable/disable logging to a file via 'ENABLE_FILE_LOGGING', or
# tuf.log.enable_file_logging() and tuf.log.disable_file_logging().
ENABLE_FILE_LOGGING = False
# If file logging is enabled via 'ENABLE_FILE_LOGGING', TUF log messages will
# be saved to 'LOG_FILENAME'
LOG_FILENAME = 'tuf.log'
# Since the timestamp role does not have signed metadata about itself, we set a
# default but sane upper bound for the number of bytes required to download it.
DEFAULT_TIMESTAMP_REQUIRED_LENGTH = 16384 #bytes
# The Root role may be updated without knowing its version if top-level
# metadata cannot be safely downloaded (e.g., keys may have been revoked, thus
# requiring a new Root file that includes the updated keys). Set a default
# upper bound for the maximum total bytes that may be downloaded for Root
# metadata.
DEFAULT_ROOT_REQUIRED_LENGTH = 512000 #bytes
# Set a default, but sane, upper bound for the number of bytes required to
# download Snapshot metadata.
DEFAULT_SNAPSHOT_REQUIRED_LENGTH = 2000000 #bytes
# Set a default, but sane, upper bound for the number of bytes required to
# download Targets metadata.
DEFAULT_TARGETS_REQUIRED_LENGTH = 5000000 #bytes
# Set a timeout value in seconds (float) for non-blocking socket operations.
SOCKET_TIMEOUT = 4 #seconds
# The maximum chunk of data, in bytes, we would download in every round.
CHUNK_SIZE = 400000 #bytes
# The minimum average download speed (bytes/second) that must be met to
# avoid being considered as a slow retrieval attack.
MIN_AVERAGE_DOWNLOAD_SPEED = 50 #bytes/second
# By default, limit number of delegatees we visit for any target.
MAX_NUMBER_OF_DELEGATIONS = 2**5
# This configuration is for indicating how consistent files should be created.
# There are two options: "copy" and "hard_link". For "copy", the consistent
# file with be a copy of root.json. This approach will require the most disk
# space out of the two options. For "hard_link", the latest root.json will be
# a hard link to 2.root.json (for example). This approach is more efficient in
# terms of disk space usage. By default, we use 'copy'.
CONSISTENT_METHOD = 'copy'
# A setting for the instances where a default hashing algorithm is needed.
# This setting is currently used to calculate the path hash prefixes of hashed
# bin delegations, and digests of targets filepaths. The other instances
# (e.g., digest of files) that require a hashing algorithm rely on settings in
# the securesystemslib external library.
DEFAULT_HASH_ALGORITHM = 'sha256'
# The client's update procedure (contained within a while-loop) can potentially
# hog the CPU. The following setting can be used to force the update sequence
# to suspend execution for a specified amount of time. See
# theupdateframework/tuf/issue#338.
SLEEP_BEFORE_ROUND = None
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4126_1 |
crossvul-python_data_bad_4098_0 | from collections import defaultdict
from datetime import datetime
from debts import settle
from flask import current_app, g
from flask_sqlalchemy import BaseQuery, SQLAlchemy
from itsdangerous import (
BadSignature,
SignatureExpired,
TimedJSONWebSignatureSerializer,
URLSafeSerializer,
)
import sqlalchemy
from sqlalchemy import orm
from sqlalchemy.sql import func
from sqlalchemy_continuum import make_versioned, version_class
from sqlalchemy_continuum.plugins import FlaskPlugin
from werkzeug.security import generate_password_hash
from ihatemoney.patch_sqlalchemy_continuum import PatchedBuilder
from ihatemoney.versioning import (
ConditionalVersioningManager,
LoggingMode,
get_ip_if_allowed,
version_privacy_predicate,
)
make_versioned(
user_cls=None,
manager=ConditionalVersioningManager(
# Conditionally Disable the versioning based on each
# project's privacy preferences
tracking_predicate=version_privacy_predicate,
# Patch in a fix to a SQLAchemy-Continuum Bug.
# See patch_sqlalchemy_continuum.py
builder=PatchedBuilder(),
),
plugins=[
FlaskPlugin(
# Redirect to our own function, which respects user preferences
# on IP address collection
remote_addr_factory=get_ip_if_allowed,
# Suppress the plugin's attempt to grab a user id,
# which imports the flask_login module (causing an error)
current_user_id_factory=lambda: None,
)
],
)
db = SQLAlchemy()
class Project(db.Model):
class ProjectQuery(BaseQuery):
def get_by_name(self, name):
return Project.query.filter(Project.name == name).one()
# Direct SQLAlchemy-Continuum to track changes to this model
__versioned__ = {}
id = db.Column(db.String(64), primary_key=True)
name = db.Column(db.UnicodeText)
password = db.Column(db.String(128))
contact_email = db.Column(db.String(128))
logging_preference = db.Column(
db.Enum(LoggingMode),
default=LoggingMode.default(),
nullable=False,
server_default=LoggingMode.default().name,
)
members = db.relationship("Person", backref="project")
query_class = ProjectQuery
default_currency = db.Column(db.String(3))
@property
def _to_serialize(self):
obj = {
"id": self.id,
"name": self.name,
"contact_email": self.contact_email,
"logging_preference": self.logging_preference.value,
"members": [],
"default_currency": self.default_currency,
}
balance = self.balance
for member in self.members:
member_obj = member._to_serialize
member_obj["balance"] = balance.get(member.id, 0)
obj["members"].append(member_obj)
return obj
@property
def active_members(self):
return [m for m in self.members if m.activated]
@property
def balance(self):
balances, should_pay, should_receive = (defaultdict(int) for time in (1, 2, 3))
# for each person
for person in self.members:
# get the list of bills he has to pay
bills = Bill.query.options(orm.subqueryload(Bill.owers)).filter(
Bill.owers.contains(person)
)
for bill in bills.all():
if person != bill.payer:
share = bill.pay_each() * person.weight
should_pay[person] += share
should_receive[bill.payer] += share
for person in self.members:
balance = should_receive[person] - should_pay[person]
balances[person.id] = balance
return balances
@property
def members_stats(self):
"""Compute what each member has paid
:return: one stat dict per member
:rtype list:
"""
return [
{
"member": member,
"paid": sum(
[
bill.converted_amount
for bill in self.get_member_bills(member.id).all()
]
),
"spent": sum(
[
bill.pay_each() * member.weight
for bill in self.get_bills().all()
if member in bill.owers
]
),
"balance": self.balance[member.id],
}
for member in self.active_members
]
@property
def monthly_stats(self):
"""Compute expenses by month
:return: a dict of years mapping to a dict of months mapping to the amount
:rtype dict:
"""
monthly = defaultdict(lambda: defaultdict(float))
for bill in self.get_bills().all():
monthly[bill.date.year][bill.date.month] += bill.converted_amount
return monthly
@property
def uses_weights(self):
return len([i for i in self.members if i.weight != 1]) > 0
def get_transactions_to_settle_bill(self, pretty_output=False):
"""Return a list of transactions that could be made to settle the bill"""
def prettify(transactions, pretty_output):
""" Return pretty transactions
"""
if not pretty_output:
return transactions
pretty_transactions = []
for transaction in transactions:
pretty_transactions.append(
{
"ower": transaction["ower"].name,
"receiver": transaction["receiver"].name,
"amount": round(transaction["amount"], 2),
}
)
return pretty_transactions
# cache value for better performance
members = {person.id: person for person in self.members}
settle_plan = settle(self.balance.items()) or []
transactions = [
{
"ower": members[ower_id],
"receiver": members[receiver_id],
"amount": amount,
}
for ower_id, amount, receiver_id in settle_plan
]
return prettify(transactions, pretty_output)
def exactmatch(self, credit, debts):
"""Recursively try and find subsets of 'debts' whose sum is equal to credit"""
if not debts:
return None
if debts[0]["balance"] > credit:
return self.exactmatch(credit, debts[1:])
elif debts[0]["balance"] == credit:
return [debts[0]]
else:
match = self.exactmatch(credit - debts[0]["balance"], debts[1:])
if match:
match.append(debts[0])
else:
match = self.exactmatch(credit, debts[1:])
return match
def has_bills(self):
"""return if the project do have bills or not"""
return self.get_bills().count() > 0
def get_bills(self):
"""Return the list of bills related to this project"""
return (
Bill.query.join(Person, Project)
.filter(Bill.payer_id == Person.id)
.filter(Person.project_id == Project.id)
.filter(Project.id == self.id)
.order_by(Bill.date.desc())
.order_by(Bill.creation_date.desc())
.order_by(Bill.id.desc())
)
def get_member_bills(self, member_id):
"""Return the list of bills related to a specific member"""
return (
Bill.query.join(Person, Project)
.filter(Bill.payer_id == Person.id)
.filter(Person.project_id == Project.id)
.filter(Person.id == member_id)
.filter(Project.id == self.id)
.order_by(Bill.date.desc())
.order_by(Bill.id.desc())
)
def get_pretty_bills(self, export_format="json"):
"""Return a list of project's bills with pretty formatting"""
bills = self.get_bills()
pretty_bills = []
for bill in bills:
if export_format == "json":
owers = [ower.name for ower in bill.owers]
else:
owers = ", ".join([ower.name for ower in bill.owers])
pretty_bills.append(
{
"what": bill.what,
"amount": round(bill.amount, 2),
"date": str(bill.date),
"payer_name": Person.query.get(bill.payer_id).name,
"payer_weight": Person.query.get(bill.payer_id).weight,
"owers": owers,
}
)
return pretty_bills
def remove_member(self, member_id):
"""Remove a member from the project.
If the member is not bound to a bill, then he is deleted, otherwise
he is only deactivated.
This method returns the status DELETED or DEACTIVATED regarding the
changes made.
"""
try:
person = Person.query.get(member_id, self)
except orm.exc.NoResultFound:
return None
if not person.has_bills():
db.session.delete(person)
db.session.commit()
else:
person.activated = False
db.session.commit()
return person
def remove_project(self):
db.session.delete(self)
db.session.commit()
def generate_token(self, expiration=0):
"""Generate a timed and serialized JsonWebToken
:param expiration: Token expiration time (in seconds)
"""
if expiration:
serializer = TimedJSONWebSignatureSerializer(
current_app.config["SECRET_KEY"], expiration
)
token = serializer.dumps({"project_id": self.id}).decode("utf-8")
else:
serializer = URLSafeSerializer(current_app.config["SECRET_KEY"])
token = serializer.dumps({"project_id": self.id})
return token
@staticmethod
def verify_token(token, token_type="timed_token"):
"""Return the project id associated to the provided token,
None if the provided token is expired or not valid.
:param token: Serialized TimedJsonWebToken
"""
if token_type == "timed_token":
serializer = TimedJSONWebSignatureSerializer(
current_app.config["SECRET_KEY"]
)
else:
serializer = URLSafeSerializer(current_app.config["SECRET_KEY"])
try:
data = serializer.loads(token)
except SignatureExpired:
return None
except BadSignature:
return None
return data["project_id"]
def __str__(self):
return self.name
def __repr__(self):
return f"<Project {self.name}>"
@staticmethod
def create_demo_project():
project = Project(
id="demo",
name="demonstration",
password=generate_password_hash("demo"),
contact_email="demo@notmyidea.org",
default_currency="EUR",
)
db.session.add(project)
db.session.commit()
members = {}
for name in ("Amina", "Georg", "Alice"):
person = Person()
person.name = name
person.project = project
person.weight = 1
db.session.add(person)
members[name] = person
db.session.commit()
operations = (
("Georg", 200, ("Amina", "Georg", "Alice"), "Food shopping"),
("Alice", 20, ("Amina", "Alice"), "Beer !"),
("Amina", 50, ("Amina", "Alice", "Georg"), "AMAP"),
)
for (payer, amount, owers, subject) in operations:
bill = Bill()
bill.payer_id = members[payer].id
bill.what = subject
bill.owers = [members[name] for name in owers]
bill.amount = amount
bill.original_currency = "EUR"
bill.converted_amount = amount
db.session.add(bill)
db.session.commit()
return project
class Person(db.Model):
class PersonQuery(BaseQuery):
def get_by_name(self, name, project):
return (
Person.query.filter(Person.name == name)
.filter(Project.id == project.id)
.one()
)
def get(self, id, project=None):
if not project:
project = g.project
return (
Person.query.filter(Person.id == id)
.filter(Project.id == project.id)
.one()
)
query_class = PersonQuery
# Direct SQLAlchemy-Continuum to track changes to this model
__versioned__ = {}
__table_args__ = {"sqlite_autoincrement": True}
id = db.Column(db.Integer, primary_key=True)
project_id = db.Column(db.String(64), db.ForeignKey("project.id"))
bills = db.relationship("Bill", backref="payer")
name = db.Column(db.UnicodeText)
weight = db.Column(db.Float, default=1)
activated = db.Column(db.Boolean, default=True)
@property
def _to_serialize(self):
return {
"id": self.id,
"name": self.name,
"weight": self.weight,
"activated": self.activated,
}
def has_bills(self):
"""return if the user do have bills or not"""
bills_as_ower_number = (
db.session.query(billowers)
.filter(billowers.columns.get("person_id") == self.id)
.count()
)
return bills_as_ower_number != 0 or len(self.bills) != 0
def __str__(self):
return self.name
def __repr__(self):
return f"<Person {self.name} for project {self.project.name}>"
# We need to manually define a join table for m2m relations
billowers = db.Table(
"billowers",
db.Column("bill_id", db.Integer, db.ForeignKey("bill.id"), primary_key=True),
db.Column("person_id", db.Integer, db.ForeignKey("person.id"), primary_key=True),
sqlite_autoincrement=True,
)
class Bill(db.Model):
class BillQuery(BaseQuery):
def get(self, project, id):
try:
return (
self.join(Person, Project)
.filter(Bill.payer_id == Person.id)
.filter(Person.project_id == Project.id)
.filter(Project.id == project.id)
.filter(Bill.id == id)
.one()
)
except orm.exc.NoResultFound:
return None
def delete(self, project, id):
bill = self.get(project, id)
if bill:
db.session.delete(bill)
return bill
query_class = BillQuery
# Direct SQLAlchemy-Continuum to track changes to this model
__versioned__ = {}
__table_args__ = {"sqlite_autoincrement": True}
id = db.Column(db.Integer, primary_key=True)
payer_id = db.Column(db.Integer, db.ForeignKey("person.id"))
owers = db.relationship(Person, secondary=billowers)
amount = db.Column(db.Float)
date = db.Column(db.Date, default=datetime.now)
creation_date = db.Column(db.Date, default=datetime.now)
what = db.Column(db.UnicodeText)
external_link = db.Column(db.UnicodeText)
original_currency = db.Column(db.String(3))
converted_amount = db.Column(db.Float)
archive = db.Column(db.Integer, db.ForeignKey("archive.id"))
@property
def _to_serialize(self):
return {
"id": self.id,
"payer_id": self.payer_id,
"owers": self.owers,
"amount": self.amount,
"date": self.date,
"creation_date": self.creation_date,
"what": self.what,
"external_link": self.external_link,
"original_currency": self.original_currency,
"converted_amount": self.converted_amount,
}
def pay_each_default(self, amount):
"""Compute what each share has to pay"""
if self.owers:
weights = (
db.session.query(func.sum(Person.weight))
.join(billowers, Bill)
.filter(Bill.id == self.id)
).scalar()
return amount / weights
else:
return 0
def __str__(self):
return self.what
def pay_each(self):
return self.pay_each_default(self.converted_amount)
def __repr__(self):
return (
f"<Bill of {self.amount} from {self.payer} for "
f"{', '.join([o.name for o in self.owers])}>"
)
class Archive(db.Model):
id = db.Column(db.Integer, primary_key=True)
project_id = db.Column(db.String(64), db.ForeignKey("project.id"))
name = db.Column(db.UnicodeText)
@property
def start_date(self):
pass
@property
def end_date(self):
pass
def __repr__(self):
return "<Archive>"
sqlalchemy.orm.configure_mappers()
PersonVersion = version_class(Person)
ProjectVersion = version_class(Project)
BillVersion = version_class(Bill)
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4098_0 |
crossvul-python_data_bad_4126_0 | #!/usr/bin/env python
# Copyright 2012 - 2017, New York University and the TUF contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
"""
<Program Name>
updater.py
<Author>
Geremy Condra
Vladimir Diaz <vladimir.v.diaz@gmail.com>
<Started>
July 2012. Based on a previous version of this module. (VLAD)
<Copyright>
See LICENSE-MIT OR LICENSE for licensing information.
<Purpose>
'updater.py' is intended to be the only TUF module that software update
systems need to utilize. It provides a single class representing an
updater that includes methods to download, install, and verify
metadata/target files in a secure manner. Importing 'updater.py' and
instantiating its main class is all that is required by the client prior
to a TUF update request. The importation and instantiation steps allow
TUF to load all of the required metadata files and set the repository mirror
information.
An overview of the update process:
1. The software update system instructs TUF to check for updates.
2. TUF downloads and verifies timestamp.json.
3. If timestamp.json indicates that snapshot.json has changed, TUF downloads
and verifies snapshot.json.
4. TUF determines which metadata files listed in snapshot.json differ from
those described in the last snapshot.json that TUF has seen. If root.json
has changed, the update process starts over using the new root.json.
5. TUF provides the software update system with a list of available files
according to targets.json.
6. The software update system instructs TUF to download a specific target
file.
7. TUF downloads and verifies the file and then makes the file available to
the software update system.
<Example Client>
# The client first imports the 'updater.py' module, the only module the
# client is required to import. The client will utilize a single class
# from this module.
import tuf.client.updater
# The only other module the client interacts with is 'tuf.settings'. The
# client accesses this module solely to set the repository directory.
# This directory will hold the files downloaded from a remote repository.
tuf.settings.repositories_directory = 'local-repository'
# Next, the client creates a dictionary object containing the repository
# mirrors. The client may download content from any one of these mirrors.
# In the example below, a single mirror named 'mirror1' is defined. The
# mirror is located at 'http://localhost:8001', and all of the metadata
# and targets files can be found in the 'metadata' and 'targets' directory,
# respectively. If the client wishes to only download target files from
# specific directories on the mirror, the 'confined_target_dirs' field
# should be set. In the example, the client has chosen '', which is
# interpreted as no confinement. In other words, the client can download
# targets from any directory or subdirectories. If the client had chosen
# 'targets1/', they would have been confined to the '/targets/targets1/'
# directory on the 'http://localhost:8001' mirror.
repository_mirrors = {'mirror1': {'url_prefix': 'http://localhost:8001',
'metadata_path': 'metadata',
'targets_path': 'targets',
'confined_target_dirs': ['']}}
# The updater may now be instantiated. The Updater class of 'updater.py'
# is called with two arguments. The first argument assigns a name to this
# particular updater and the second argument the repository mirrors defined
# above.
updater = tuf.client.updater.Updater('updater', repository_mirrors)
# The client next calls the refresh() method to ensure it has the latest
# copies of the metadata files.
updater.refresh()
# get_one_valid_targetinfo() updates role metadata when required. In other
# words, if the client doesn't possess the metadata that lists 'LICENSE.txt',
# get_one_valid_targetinfo() will try to fetch / update it.
target = updater.get_one_valid_targetinfo('LICENSE.txt')
# Determine if 'target' has changed since the client's last refresh(). A
# target is considered updated if it does not exist in
# 'destination_directory' (current directory) or the target located there has
# changed.
destination_directory = '.'
updated_target = updater.updated_targets([target], destination_directory)
for target in updated_target:
updater.download_target(target, destination_directory)
# Client code here may also reference target information (including
# 'custom') by directly accessing the dictionary entries of the target.
# The 'custom' entry is additional file information explicitly set by the
# remote repository.
target_path = target['filepath']
target_length = target['fileinfo']['length']
target_hashes = target['fileinfo']['hashes']
target_custom_data = target['fileinfo']['custom']
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import errno
import logging
import os
import shutil
import time
import fnmatch
import copy
import warnings
import tuf
import tuf.download
import tuf.formats
import tuf.settings
import tuf.keydb
import tuf.log
import tuf.mirrors
import tuf.roledb
import tuf.sig
import tuf.exceptions
import securesystemslib.hash
import securesystemslib.keys
import securesystemslib.util
import six
import iso8601
# The Timestamp role does not have signed metadata about it; otherwise we
# would need an infinite regress of metadata. Therefore, we use some
# default, but sane, upper file length for its metadata.
DEFAULT_TIMESTAMP_UPPERLENGTH = tuf.settings.DEFAULT_TIMESTAMP_REQUIRED_LENGTH
# The Root role may be updated without knowing its version number if
# top-level metadata cannot be safely downloaded (e.g., keys may have been
# revoked, thus requiring a new Root file that includes the updated keys)
# and 'unsafely_update_root_if_necessary' is True.
# We use some default, but sane, upper file length for its metadata.
DEFAULT_ROOT_UPPERLENGTH = tuf.settings.DEFAULT_ROOT_REQUIRED_LENGTH
# See 'log.py' to learn how logging is handled in TUF.
logger = logging.getLogger('tuf.client.updater')
# Disable 'iso8601' logger messages to prevent 'iso8601' from clogging the
# log file.
iso8601_logger = logging.getLogger('iso8601')
iso8601_logger.disabled = True
class MultiRepoUpdater(object):
"""
<Purpose>
Provide a way for clients to request a target file from multiple
repositories. Which repositories to query is determined by the map
file (i.e,. map.json).
See TAP 4 for more information on the map file and how to request updates
from multiple repositories. TAP 4 describes how users may specify that a
particular threshold of repositories be used for some targets, while a
different threshold of repositories be used for others.
<Arguments>
map_file:
The path of the map file. The map file is needed to determine which
repositories to query given a target file.
<Exceptions>
securesystemslib.exceptions.FormatError, if the map file is improperly
formatted.
tuf.exceptions.Error, if the map file cannot be loaded.
<Side Effects>
None.
<Returns>
None.
"""
def __init__(self, map_file):
# Is 'map_file' a path? If not, raise
# 'securesystemslib.exceptions.FormatError'. The actual content of the map
# file is validated later on in this method.
securesystemslib.formats.PATH_SCHEMA.check_match(map_file)
# A dictionary mapping repositories to TUF updaters.
self.repository_names_to_updaters = {}
try:
# The map file dictionary that associates targets with repositories.
self.map_file = securesystemslib.util.load_json_file(map_file)
except (securesystemslib.exceptions.Error, IOError) as e:
raise tuf.exceptions.Error('Cannot load the map file: ' + str(e))
# Raise securesystemslib.exceptions.FormatError if the map file is
# improperly formatted.
tuf.formats.MAPFILE_SCHEMA.check_match(self.map_file)
# Save the "repositories" entry of the map file, with the following
# example format:
#
# "repositories": {
# "Django": ["https://djangoproject.com/"],
# "PyPI": ["https://pypi.python.org/"]
# }
self.repository_names_to_mirrors = self.map_file['repositories']
def get_valid_targetinfo(self, target_filename, match_custom_field=True):
"""
<Purpose>
Get valid targetinfo, if any, for the given 'target_filename'. The map
file controls the targetinfo returned (see TAP 4). Return a dict of the
form {updater1: targetinfo, updater2: targetinfo, ...}, where the dict
keys are updater objects, and the dict values the matching targetinfo for
'target_filename'.
<Arguments>
target_filename:
The relative path of the target file to update.
match_custom_field:
Boolean that indicates whether the optional custom field in targetinfo
should match across the targetinfo provided by the threshold of
repositories.
<Exceptions>
tuf.exceptions.FormatError, if the argument is improperly formatted.
tuf.exceptions.Error, if the required local metadata directory or the
Root file does not exist.
tuf.exceptions.UnknownTargetError, if the repositories in the map file do
not agree on the target, or none of them have signed for the target.
<Side Effects>
None.
<Returns>
A dict of the form: {updater1: targetinfo, updater2: targetinfo, ...}.
The targetinfo (conformant with tuf.formats.TARGETINFO_SCHEMA) is for
'target_filename'.
"""
# Is the argument properly formatted? If not, raise
# 'tuf.exceptions.FormatError'.
tuf.formats.RELPATH_SCHEMA.check_match(target_filename)
# TAP 4 requires that the following attributes be present in mappings:
# "paths", "repositories", "terminating", and "threshold".
tuf.formats.MAPPING_SCHEMA.check_match(self.map_file['mapping'])
# Set the top-level directory containing the metadata for each repository.
repositories_directory = tuf.settings.repositories_directory
# Verify that the required local directories exist for each repository.
self._verify_metadata_directories(repositories_directory)
# Iterate mappings.
# [{"paths": [], "repositories": [], "terminating": Boolean, "threshold":
# NUM}, ...]
for mapping in self.map_file['mapping']:
logger.debug('Interrogating mappings..' + repr(mapping))
if not self._target_matches_path_pattern(
target_filename, mapping['paths']):
# The mapping is irrelevant to the target file. Try the next one, if
# any.
continue
# The mapping is relevant to the target...
else:
# Do the repositories in the mapping provide a threshold of matching
# targetinfo?
valid_targetinfo = self._matching_targetinfo(target_filename,
mapping, match_custom_field)
if valid_targetinfo:
return valid_targetinfo
else:
# If we are here, it means either (1) the mapping is irrelevant to
# the target, (2) the targets were missing from all repositories in
# this mapping, or (3) the targets on all repositories did not match.
# Whatever the case may be, are we allowed to continue to the next
# mapping? Let's check the terminating entry!
if not mapping['terminating']:
logger.debug('The mapping was irrelevant to the target, and'
' "terminating" was set to False. Trying the next mapping...')
continue
else:
raise tuf.exceptions.UnknownTargetError('The repositories in the'
' mapping do not agree on the target, or none of them have'
' signed for the target, and "terminating" was set to True.')
# If we are here, it means either there were no mappings, or none of the
# mappings provided the target.
logger.debug('Did not find valid targetinfo for ' + repr(target_filename))
raise tuf.exceptions.UnknownTargetError('The repositories in the map'
' file do not agree on the target, or none of them have signed'
' for the target.')
def _verify_metadata_directories(self, repositories_directory):
# Iterate 'self.repository_names_to_mirrors' and verify that the expected
# local files and directories exist. TAP 4 requires a separate local
# directory for each repository.
for repository_name in self.repository_names_to_mirrors:
logger.debug('Interrogating repository: ' + repr(repository_name))
# Each repository must cache its metadata in a separate location.
repository_directory = os.path.join(repositories_directory,
repository_name)
if not os.path.isdir(repository_directory):
raise tuf.exceptions.Error('The metadata directory'
' for ' + repr(repository_name) + ' must exist'
' at ' + repr(repository_directory))
else:
logger.debug('Found local directory for ' + repr(repository_name))
# The latest known root metadata file must also exist on disk.
root_file = os.path.join(
repository_directory, 'metadata', 'current', 'root.json')
if not os.path.isfile(root_file):
raise tuf.exceptions.Error(
'The Root file must exist at ' + repr(root_file))
else:
logger.debug('Found local Root file at ' + repr(root_file))
def _matching_targetinfo(
self, target_filename, mapping, match_custom_field=True):
valid_targetinfo = {}
# Retrieve the targetinfo from each repository using the underlying
# Updater() instance.
for repository_name in mapping['repositories']:
logger.debug('Retrieving targetinfo for ' + repr(target_filename) +
' from repository...')
try:
targetinfo, updater = self._update_from_repository(
repository_name, target_filename)
except (tuf.exceptions.UnknownTargetError, tuf.exceptions.Error):
continue
valid_targetinfo[updater] = targetinfo
matching_targetinfo = {}
logger.debug('Verifying that a threshold of targetinfo are equal...')
# Iterate 'valid_targetinfo', looking for a threshold number of matches
# for 'targetinfo'. The first targetinfo to reach the required threshold
# is returned. For example, suppose the following list of targetinfo and
# a threshold of 2:
# [A, B, C, B, A, C]
# In this case, targetinfo B is returned.
for valid_updater, compared_targetinfo in six.iteritems(valid_targetinfo):
if not self._targetinfo_match(
targetinfo, compared_targetinfo, match_custom_field):
continue
else:
matching_targetinfo[valid_updater] = targetinfo
if not len(matching_targetinfo) >= mapping['threshold']:
continue
else:
logger.debug('Found a threshold of matching targetinfo!')
# We now have a targetinfo (that matches across a threshold of
# repositories as instructed by the map file), along with the
# updaters that sign for it.
logger.debug(
'Returning updaters for targetinfo: ' + repr(targetinfo))
return matching_targetinfo
return None
def _targetinfo_match(self, targetinfo1, targetinfo2, match_custom_field=True):
if match_custom_field:
return (targetinfo1 == targetinfo2)
else:
targetinfo1_without_custom = copy.deepcopy(targetinfo1)
targetinfo2_without_custom = copy.deepcopy(targetinfo2)
targetinfo1_without_custom['fileinfo'].pop('custom', None)
targetinfo2_without_custom['fileinfo'].pop('custom', None)
return (targetinfo1_without_custom == targetinfo2_without_custom)
def _target_matches_path_pattern(self, target_filename, path_patterns):
for path_pattern in path_patterns:
logger.debug('Interrogating pattern ' + repr(path_pattern) + 'for'
' target: ' + repr(target_filename))
# Example: "foo.tgz" should match with "/*.tgz". Make sure to strip any
# leading path separators so that a match is made if a repo maintainer
# uses a leading separator with a delegated glob pattern, but a client
# doesn't include one when a target file is requested.
if fnmatch.fnmatch(target_filename.lstrip(os.sep), path_pattern.lstrip(os.sep)):
logger.debug('Found a match for ' + repr(target_filename))
return True
else:
logger.debug('Continue searching for relevant paths.')
continue
# If we are here, then none of the paths are relevant to the target.
logger.debug('None of the paths are relevant.')
return False
def get_updater(self, repository_name):
"""
<Purpose>
Get the updater instance corresponding to 'repository_name'.
<Arguments>
repository_name:
The name of the repository as it appears in the map file. For example,
"Django" and "PyPI" in the "repositories" entry of the map file.
"repositories": {
"Django": ["https://djangoproject.com/"],
"PyPI": ["https://pypi.python.org/"]
}
<Exceptions>
tuf.exceptions.FormatError, if any of the arguments are improperly
formatted.
<Side Effects>
None.
<Returns>
Returns the Updater() instance for 'repository_name'. If the instance
does not exist, return None.
"""
# Are the arguments properly formatted? If not, raise
# 'tuf.exceptions.FormatError'.
tuf.formats.NAME_SCHEMA.check_match(repository_name)
updater = self.repository_names_to_updaters.get(repository_name)
if not updater:
if repository_name not in self.repository_names_to_mirrors:
return None
else:
# Create repository mirrors object needed by the
# tuf.client.updater.Updater(). Each 'repository_name' can have more
# than one mirror.
mirrors = {}
for url in self.repository_names_to_mirrors[repository_name]:
mirrors[url] = {
'url_prefix': url,
'metadata_path': 'metadata',
'targets_path': 'targets',
'confined_target_dirs': ['']}
try:
# NOTE: State (e.g., keys) should NOT be shared across different
# updater instances.
logger.debug('Adding updater for ' + repr(repository_name))
updater = tuf.client.updater.Updater(repository_name, mirrors)
except Exception:
return None
else:
self.repository_names_to_updaters[repository_name] = updater
else:
logger.debug('Found an updater for ' + repr(repository_name))
# Ensure the updater's metadata is the latest before returning it.
updater.refresh()
return updater
def _update_from_repository(self, repository_name, target_filename):
updater = self.get_updater(repository_name)
if not updater:
raise tuf.exceptions.Error(
'Cannot load updater for ' + repr(repository_name))
else:
# Get one valid target info from the Updater object.
# 'tuf.exceptions.UnknownTargetError' raised by get_one_valid_targetinfo
# if a valid target cannot be found.
return updater.get_one_valid_targetinfo(target_filename), updater
class Updater(object):
"""
<Purpose>
Provide a class that can download target files securely. The updater
keeps track of currently and previously trusted metadata, target files
available to the client, target file attributes such as file size and
hashes, key and role information, metadata signatures, and the ability
to determine when the download of a file should be permitted.
<Updater Attributes>
self.metadata:
Dictionary holding the currently and previously trusted metadata.
Example: {'current': {'root': ROOT_SCHEMA,
'targets':TARGETS_SCHEMA, ...},
'previous': {'root': ROOT_SCHEMA,
'targets':TARGETS_SCHEMA, ...}}
self.metadata_directory:
The directory where trusted metadata is stored.
self.versioninfo:
A cache of version numbers for the roles available on the repository.
Example: {'targets.json': {'version': 128}, ...}
self.mirrors:
The repository mirrors from which metadata and targets are available.
Conformant to 'tuf.formats.MIRRORDICT_SCHEMA'.
self.repository_name:
The name of the updater instance.
<Updater Methods>
refresh():
This method downloads, verifies, and loads metadata for the top-level
roles in a specific order (i.e., timestamp -> snapshot -> root -> targets)
The expiration time for downloaded metadata is also verified.
The metadata for delegated roles are not refreshed by this method, but by
the method that returns targetinfo (i.e., get_one_valid_targetinfo()).
The refresh() method should be called by the client before any target
requests.
get_one_valid_targetinfo(file_path):
Returns the target information for a specific file identified by its file
path. This target method also downloads the metadata of updated targets.
updated_targets(targets, destination_directory):
After the client has retrieved the target information for those targets
they are interested in updating, they would call this method to determine
which targets have changed from those saved locally on disk. All the
targets that have changed are returns in a list. From this list, they
can request a download by calling 'download_target()'.
download_target(target, destination_directory):
This method performs the actual download of the specified target. The
file is saved to the 'destination_directory' argument.
remove_obsolete_targets(destination_directory):
Any files located in 'destination_directory' that were previously
served by the repository but have since been removed, can be deleted
from disk by the client by calling this method.
Note: The methods listed above are public and intended for the software
updater integrating TUF with this module. All other methods that may begin
with a single leading underscore are non-public and only used internally.
updater.py is not subclassed in TUF, nor is it designed to be subclassed,
so double leading underscores is not used.
http://www.python.org/dev/peps/pep-0008/#method-names-and-instance-variables
"""
def __init__(self, repository_name, repository_mirrors):
"""
<Purpose>
Constructor. Instantiating an updater object causes all the metadata
files for the top-level roles to be read from disk, including the key and
role information for the delegated targets of 'targets'. The actual
metadata for delegated roles is not loaded in __init__. The metadata for
these delegated roles, including nested delegated roles, are loaded,
updated, and saved to the 'self.metadata' store, as needed, by
get_one_valid_targetinfo().
The initial set of metadata files are provided by the software update
system utilizing TUF.
In order to use an updater, the following directories must already
exist locally:
{tuf.settings.repositories_directory}/{repository_name}/metadata/current
{tuf.settings.repositories_directory}/{repository_name}/metadata/previous
and, at a minimum, the root metadata file must exist:
{tuf.settings.repositories_directory}/{repository_name}/metadata/current/root.json
<Arguments>
repository_name:
The name of the repository.
repository_mirrors:
A dictionary holding repository mirror information, conformant to
'tuf.formats.MIRRORDICT_SCHEMA'. This dictionary holds
information such as the directory containing the metadata and target
files, the server's URL prefix, and the target content directories the
client should be confined to.
repository_mirrors = {'mirror1': {'url_prefix': 'http://localhost:8001',
'metadata_path': 'metadata',
'targets_path': 'targets',
'confined_target_dirs': ['']}}
<Exceptions>
securesystemslib.exceptions.FormatError:
If the arguments are improperly formatted.
tuf.exceptions.RepositoryError:
If there is an error with the updater's repository files, such
as a missing 'root.json' file.
<Side Effects>
Th metadata files (e.g., 'root.json', 'targets.json') for the top- level
roles are read from disk and stored in dictionaries. In addition, the
key and roledb modules are populated with 'repository_name' entries.
<Returns>
None.
"""
# Do the arguments have the correct format?
# These checks ensure the arguments have the appropriate
# number of objects and object types and that all dict
# keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mistmatch.
securesystemslib.formats.NAME_SCHEMA.check_match(repository_name)
tuf.formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors)
# Save the validated arguments.
self.repository_name = repository_name
self.mirrors = repository_mirrors
# Store the trusted metadata read from disk.
self.metadata = {}
# Store the currently trusted/verified metadata.
self.metadata['current'] = {}
# Store the previously trusted/verified metadata.
self.metadata['previous'] = {}
# Store the version numbers of roles available on the repository. The dict
# keys are paths, and the dict values versioninfo data. This information
# can help determine whether a metadata file has changed and needs to be
# re-downloaded.
self.versioninfo = {}
# Store the file information of the root and snapshot roles. The dict keys
# are paths, the dict values fileinfo data. This information can help
# determine whether a metadata file has changed and so needs to be
# re-downloaded.
self.fileinfo = {}
# Store the location of the client's metadata directory.
self.metadata_directory = {}
# Store the 'consistent_snapshot' of the Root role. This setting
# determines if metadata and target files downloaded from remote
# repositories include the digest.
self.consistent_snapshot = False
# Ensure the repository metadata directory has been set.
if tuf.settings.repositories_directory is None:
raise tuf.exceptions.RepositoryError('The TUF update client'
' module must specify the directory containing the local repository'
' files. "tuf.settings.repositories_directory" MUST be set.')
# Set the path for the current set of metadata files.
repositories_directory = tuf.settings.repositories_directory
repository_directory = os.path.join(repositories_directory, self.repository_name)
current_path = os.path.join(repository_directory, 'metadata', 'current')
# Ensure the current path is valid/exists before saving it.
if not os.path.exists(current_path):
raise tuf.exceptions.RepositoryError('Missing'
' ' + repr(current_path) + '. This path must exist and, at a minimum,'
' contain the Root metadata file.')
self.metadata_directory['current'] = current_path
# Set the path for the previous set of metadata files.
previous_path = os.path.join(repository_directory, 'metadata', 'previous')
# Ensure the previous path is valid/exists.
if not os.path.exists(previous_path):
raise tuf.exceptions.RepositoryError('Missing ' + repr(previous_path) + '.'
' This path MUST exist.')
self.metadata_directory['previous'] = previous_path
# Load current and previous metadata.
for metadata_set in ['current', 'previous']:
for metadata_role in ['root', 'targets', 'snapshot', 'timestamp']:
self._load_metadata_from_file(metadata_set, metadata_role)
# Raise an exception if the repository is missing the required 'root'
# metadata.
if 'root' not in self.metadata['current']:
raise tuf.exceptions.RepositoryError('No root of trust!'
' Could not find the "root.json" file.')
def __str__(self):
"""
The string representation of an Updater object.
"""
return self.repository_name
def _load_metadata_from_file(self, metadata_set, metadata_role):
"""
<Purpose>
Non-public method that loads current or previous metadata if there is a
local file. If the expected file belonging to 'metadata_role' (e.g.,
'root.json') cannot be loaded, raise an exception. The extracted metadata
object loaded from file is saved to the metadata store (i.e.,
self.metadata).
<Arguments>
metadata_set:
The string 'current' or 'previous', depending on whether one wants to
load the currently or previously trusted metadata file.
metadata_role:
The name of the metadata. This is a role name and should
not end in '.json'. Examples: 'root', 'targets', 'unclaimed'.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the role object loaded for 'metadata_role' is improperly formatted.
securesystemslib.exceptions.Error:
If there was an error importing a delegated role of 'metadata_role'
or the 'metadata_set' is not one currently supported.
<Side Effects>
If the metadata is loaded successfully, it is saved to the metadata
store. If 'metadata_role' is 'root', the role and key databases
are reloaded. If 'metadata_role' is a target metadata, all its
delegated roles are refreshed.
<Returns>
None.
"""
# Ensure we have a valid metadata set.
if metadata_set not in ['current', 'previous']:
raise securesystemslib.exceptions.Error(
'Invalid metadata set: ' + repr(metadata_set))
# Save and construct the full metadata path.
metadata_directory = self.metadata_directory[metadata_set]
metadata_filename = metadata_role + '.json'
metadata_filepath = os.path.join(metadata_directory, metadata_filename)
# Ensure the metadata path is valid/exists, else ignore the call.
if os.path.exists(metadata_filepath):
# Load the file. The loaded object should conform to
# 'tuf.formats.SIGNABLE_SCHEMA'.
try:
metadata_signable = securesystemslib.util.load_json_file(
metadata_filepath)
# Although the metadata file may exist locally, it may not
# be a valid json file. On the next refresh cycle, it will be
# updated as required. If Root if cannot be loaded from disk
# successfully, an exception should be raised by the caller.
except securesystemslib.exceptions.Error:
return
tuf.formats.check_signable_object_format(metadata_signable)
# Extract the 'signed' role object from 'metadata_signable'.
metadata_object = metadata_signable['signed']
# Save the metadata object to the metadata store.
self.metadata[metadata_set][metadata_role] = metadata_object
# If 'metadata_role' is 'root' or targets metadata, the key and role
# databases must be rebuilt. If 'root', ensure self.consistent_snaptshots
# is updated.
if metadata_set == 'current':
if metadata_role == 'root':
self._rebuild_key_and_role_db()
self.consistent_snapshot = metadata_object['consistent_snapshot']
elif metadata_object['_type'] == 'targets':
# TODO: Should we also remove the keys of the delegated roles?
self._import_delegations(metadata_role)
def _rebuild_key_and_role_db(self):
"""
<Purpose>
Non-public method that rebuilds the key and role databases from the
currently trusted 'root' metadata object extracted from 'root.json'.
This private method is called when a new/updated 'root' metadata file is
loaded or when updater.refresh() is called. This method will only store
the role information of the top-level roles (i.e., 'root', 'targets',
'snapshot', 'timestamp').
<Arguments>
None.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the 'root' metadata is improperly formatted.
securesystemslib.exceptions.Error:
If there is an error loading a role contained in the 'root'
metadata.
<Side Effects>
The key and role databases are reloaded for the top-level roles.
<Returns>
None.
"""
# Clobbering this means all delegated metadata files are rendered outdated
# and will need to be reloaded. However, reloading the delegated metadata
# files is avoided here because fetching target information with
# get_one_valid_targetinfo() always causes a refresh of these files. The
# metadata files for delegated roles are also not loaded when the
# repository is first instantiated. Due to this setup, reloading delegated
# roles is not required here.
tuf.keydb.create_keydb_from_root_metadata(self.metadata['current']['root'],
self.repository_name)
tuf.roledb.create_roledb_from_root_metadata(self.metadata['current']['root'],
self.repository_name)
def _import_delegations(self, parent_role):
"""
<Purpose>
Non-public method that imports all the roles delegated by 'parent_role'.
<Arguments>
parent_role:
The role whose delegations will be imported.
<Exceptions>
securesystemslib.exceptions.FormatError:
If a key attribute of a delegated role's signing key is
improperly formatted.
securesystemslib.exceptions.Error:
If the signing key of a delegated role cannot not be loaded.
<Side Effects>
The key and role databases are modified to include the newly loaded roles
delegated by 'parent_role'.
<Returns>
None.
"""
current_parent_metadata = self.metadata['current'][parent_role]
if 'delegations' not in current_parent_metadata:
return
# This could be quite slow with a large number of delegations.
keys_info = current_parent_metadata['delegations'].get('keys', {})
roles_info = current_parent_metadata['delegations'].get('roles', [])
logger.debug('Adding roles delegated from ' + repr(parent_role) + '.')
# Iterate the keys of the delegated roles of 'parent_role' and load them.
for keyid, keyinfo in six.iteritems(keys_info):
if keyinfo['keytype'] in ['rsa', 'ed25519', 'ecdsa-sha2-nistp256']:
# We specify the keyid to ensure that it's the correct keyid
# for the key.
try:
# The repo may have used hashing algorithms for the generated keyids
# that doesn't match the client's set of hash algorithms. Make sure
# to only used the repo's selected hashing algorithms.
hash_algorithms = securesystemslib.settings.HASH_ALGORITHMS
securesystemslib.settings.HASH_ALGORITHMS = keyinfo['keyid_hash_algorithms']
key, keyids = securesystemslib.keys.format_metadata_to_key(keyinfo)
securesystemslib.settings.HASH_ALGORITHMS = hash_algorithms
for key_id in keyids:
key['keyid'] = key_id
tuf.keydb.add_key(key, keyid=None, repository_name=self.repository_name)
except tuf.exceptions.KeyAlreadyExistsError:
pass
except (securesystemslib.exceptions.FormatError, securesystemslib.exceptions.Error):
logger.exception('Invalid key for keyid: ' + repr(keyid) + '.')
logger.error('Aborting role delegation for parent role ' + parent_role + '.')
raise
else:
logger.warning('Invalid key type for ' + repr(keyid) + '.')
continue
# Add the roles to the role database.
for roleinfo in roles_info:
try:
# NOTE: tuf.roledb.add_role will take care of the case where rolename
# is None.
rolename = roleinfo.get('name')
logger.debug('Adding delegated role: ' + str(rolename) + '.')
tuf.roledb.add_role(rolename, roleinfo, self.repository_name)
except tuf.exceptions.RoleAlreadyExistsError:
logger.warning('Role already exists: ' + rolename)
except Exception:
logger.exception('Failed to add delegated role: ' + repr(rolename) + '.')
raise
def refresh(self, unsafely_update_root_if_necessary=True):
"""
<Purpose>
Update the latest copies of the metadata for the top-level roles. The
update request process follows a specific order to ensure the metadata
files are securely updated:
timestamp -> snapshot -> root (if necessary) -> targets.
Delegated metadata is not refreshed by this method. After this method is
called, the use of get_one_valid_targetinfo() will update delegated
metadata, when required. Calling refresh() ensures that top-level
metadata is up-to-date, so that the target methods can refer to the
latest available content. Thus, refresh() should always be called by the
client before any requests of target file information.
The expiration time for downloaded metadata is also verified, including
local metadata that the repository claims is up to date.
If the refresh fails for any reason, then unless
'unsafely_update_root_if_necessary' is set, refresh will be retried once
after first attempting to update the root metadata file. Only after this
check will the exceptions listed here potentially be raised.
<Arguments>
unsafely_update_root_if_necessary:
Boolean that indicates whether to unsafely update the Root metadata if
any of the top-level metadata cannot be downloaded successfully. The
Root role is unsafely updated if its current version number is unknown.
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
If the metadata for any of the top-level roles cannot be updated.
tuf.exceptions.ExpiredMetadataError:
If any of the top-level metadata is expired (whether a new version was
downloaded expired or no new version was found and the existing
version is now expired).
<Side Effects>
Updates the metadata files of the top-level roles with the latest
information.
<Returns>
None.
"""
# Do the arguments have the correct format?
# This check ensures the arguments have the appropriate
# number of objects and object types, and that all dict
# keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fail.
securesystemslib.formats.BOOLEAN_SCHEMA.check_match(
unsafely_update_root_if_necessary)
# Update the top-level metadata. The _update_metadata_if_changed() and
# _update_metadata() calls below do NOT perform an update if there
# is insufficient trusted signatures for the specified metadata.
# Raise 'tuf.exceptions.NoWorkingMirrorError' if an update fails.
root_metadata = self.metadata['current']['root']
try:
self._ensure_not_expired(root_metadata, 'root')
except tuf.exceptions.ExpiredMetadataError:
# Raise 'tuf.exceptions.NoWorkingMirrorError' if a valid (not
# expired, properly signed, and valid metadata) 'root.json' cannot be
# installed.
if unsafely_update_root_if_necessary:
logger.info('Expired Root metadata was loaded from disk.'
' Try to update it now.' )
# The caller explicitly requested not to unsafely fetch an expired Root.
else:
logger.info('An expired Root metadata was loaded and must be updated.')
raise
# TODO: How should the latest root metadata be verified? According to the
# currently trusted root keys? What if all of the currently trusted
# root keys have since been revoked by the latest metadata? Alternatively,
# do we blindly trust the downloaded root metadata here?
self._update_root_metadata(root_metadata)
# Ensure that the role and key information of the top-level roles is the
# latest. We do this whether or not Root needed to be updated, in order to
# ensure that, e.g., the entries in roledb for top-level roles are
# populated with expected keyid info so that roles can be validated. In
# certain circumstances, top-level metadata might be missing because it was
# marked obsolete and deleted after a failed attempt, and thus we should
# refresh them here as a protective measure. See Issue #736.
self._rebuild_key_and_role_db()
self.consistent_snapshot = \
self.metadata['current']['root']['consistent_snapshot']
# Use default but sane information for timestamp metadata, and do not
# require strict checks on its required length.
self._update_metadata('timestamp', DEFAULT_TIMESTAMP_UPPERLENGTH)
# TODO: After fetching snapshot.json, we should either verify the root
# fileinfo referenced there matches what was fetched earlier in
# _update_root_metadata() or make another attempt to download root.json.
self._update_metadata_if_changed('snapshot',
referenced_metadata='timestamp')
self._update_metadata_if_changed('targets')
def _update_root_metadata(self, current_root_metadata):
"""
<Purpose>
The root file must be signed by the current root threshold and keys as
well as the previous root threshold and keys. The update process for root
files means that each intermediate root file must be downloaded, to build
a chain of trusted root keys from keys already trusted by the client:
1.root -> 2.root -> 3.root
3.root must be signed by the threshold and keys of 2.root, and 2.root
must be signed by the threshold and keys of 1.root.
<Arguments>
current_root_metadata:
The currently held version of root.
<Side Effects>
Updates the root metadata files with the latest information.
<Returns>
None.
"""
# Retrieve the latest, remote root.json.
latest_root_metadata_file = self._get_metadata_file(
'root', 'root.json', DEFAULT_ROOT_UPPERLENGTH, None)
latest_root_metadata = securesystemslib.util.load_json_string(
latest_root_metadata_file.read().decode('utf-8'))
next_version = current_root_metadata['version'] + 1
latest_version = latest_root_metadata['signed']['version']
# update from the next version of root up to (and including) the latest
# version. For example:
# current = version 1
# latest = version 3
# update from 1.root.json to 3.root.json.
for version in range(next_version, latest_version + 1):
# Temporarily set consistent snapshot. Will be updated to whatever is set
# in the latest root.json after running through the intermediates with
# _update_metadata().
self.consistent_snapshot = True
self._update_metadata('root', DEFAULT_ROOT_UPPERLENGTH, version=version)
def _check_hashes(self, file_object, trusted_hashes):
"""
<Purpose>
Non-public method that verifies multiple secure hashes of the downloaded
file 'file_object'. If any of these fail it raises an exception. This is
to conform with the TUF spec, which support clients with different hashing
algorithms. The 'hash.py' module is used to compute the hashes of
'file_object'.
<Arguments>
file_object:
A 'securesystemslib.util.TempFile' file-like object. 'file_object'
ensures that a read() without a size argument properly reads the entire
file.
trusted_hashes:
A dictionary with hash-algorithm names as keys and hashes as dict values.
The hashes should be in the hexdigest format. Should be Conformant to
'securesystemslib.formats.HASHDICT_SCHEMA'.
<Exceptions>
securesystemslib.exceptions.BadHashError, if the hashes don't match.
<Side Effects>
Hash digest object is created using the 'securesystemslib.hash' module.
<Returns>
None.
"""
# Verify each trusted hash of 'trusted_hashes'. If all are valid, simply
# return.
for algorithm, trusted_hash in six.iteritems(trusted_hashes):
digest_object = securesystemslib.hash.digest(algorithm)
digest_object.update(file_object.read())
computed_hash = digest_object.hexdigest()
# Raise an exception if any of the hashes are incorrect.
if trusted_hash != computed_hash:
raise securesystemslib.exceptions.BadHashError(trusted_hash,
computed_hash)
else:
logger.info('The file\'s ' + algorithm + ' hash is'
' correct: ' + trusted_hash)
def _hard_check_file_length(self, file_object, trusted_file_length):
"""
<Purpose>
Non-public method that ensures the length of 'file_object' is strictly
equal to 'trusted_file_length'. This is a deliberately redundant
implementation designed to complement
tuf.download._check_downloaded_length().
<Arguments>
file_object:
A 'securesystemslib.util.TempFile' file-like object. 'file_object'
ensures that a read() without a size argument properly reads the entire
file.
trusted_file_length:
A non-negative integer that is the trusted length of the file.
<Exceptions>
tuf.exceptions.DownloadLengthMismatchError, if the lengths do not match.
<Side Effects>
Reads the contents of 'file_object' and logs a message if 'file_object'
matches the trusted length.
<Returns>
None.
"""
# Read the entire contents of 'file_object', a
# 'securesystemslib.util.TempFile' file-like object that ensures the entire
# file is read.
observed_length = len(file_object.read())
# Return and log a message if the length 'file_object' is equal to
# 'trusted_file_length', otherwise raise an exception. A hard check
# ensures that a downloaded file strictly matches a known, or trusted,
# file length.
if observed_length != trusted_file_length:
raise tuf.exceptions.DownloadLengthMismatchError(trusted_file_length,
observed_length)
else:
logger.debug('Observed length (' + str(observed_length) +\
') == trusted length (' + str(trusted_file_length) + ')')
def _soft_check_file_length(self, file_object, trusted_file_length):
"""
<Purpose>
Non-public method that checks the trusted file length of a
'securesystemslib.util.TempFile' file-like object. The length of the file
must be less than or equal to the expected length. This is a deliberately
redundant implementation designed to complement
tuf.download._check_downloaded_length().
<Arguments>
file_object:
A 'securesystemslib.util.TempFile' file-like object. 'file_object'
ensures that a read() without a size argument properly reads the entire
file.
trusted_file_length:
A non-negative integer that is the trusted length of the file.
<Exceptions>
tuf.exceptions.DownloadLengthMismatchError, if the lengths do
not match.
<Side Effects>
Reads the contents of 'file_object' and logs a message if 'file_object'
is less than or equal to the trusted length.
<Returns>
None.
"""
# Read the entire contents of 'file_object', a
# 'securesystemslib.util.TempFile' file-like object that ensures the entire
# file is read.
observed_length = len(file_object.read())
# Return and log a message if 'file_object' is less than or equal to
# 'trusted_file_length', otherwise raise an exception. A soft check
# ensures that an upper bound restricts how large a file is downloaded.
if observed_length > trusted_file_length:
raise tuf.exceptions.DownloadLengthMismatchError(trusted_file_length,
observed_length)
else:
logger.debug('Observed length (' + str(observed_length) +\
') <= trusted length (' + str(trusted_file_length) + ')')
def _get_target_file(self, target_filepath, file_length, file_hashes):
"""
<Purpose>
Non-public method that safely (i.e., the file length and hash are
strictly equal to the trusted) downloads a target file up to a certain
length, and checks its hashes thereafter.
<Arguments>
target_filepath:
The target filepath (relative to the repository targets directory)
obtained from TUF targets metadata.
file_length:
The expected compressed length of the target file. If the file is not
compressed, then it will simply be its uncompressed length.
file_hashes:
The expected hashes of the target file.
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
The target could not be fetched. This is raised only when all known
mirrors failed to provide a valid copy of the desired target file.
<Side Effects>
The target file is downloaded from all known repository mirrors in the
worst case. If a valid copy of the target file is found, it is stored in
a temporary file and returned.
<Returns>
A 'securesystemslib.util.TempFile' file-like object containing the target.
"""
# Define a callable function that is passed as an argument to _get_file()
# and called. The 'verify_target_file' function ensures the file length
# and hashes of 'target_filepath' are strictly equal to the trusted values.
def verify_target_file(target_file_object):
# Every target file must have its length and hashes inspected.
self._hard_check_file_length(target_file_object, file_length)
self._check_hashes(target_file_object, file_hashes)
if self.consistent_snapshot:
# Note: values() does not return a list in Python 3. Use list()
# on values() for Python 2+3 compatibility.
target_digest = list(file_hashes.values()).pop()
dirname, basename = os.path.split(target_filepath)
target_filepath = os.path.join(dirname, target_digest + '.' + basename)
return self._get_file(target_filepath, verify_target_file,
'target', file_length, download_safely=True)
def _verify_uncompressed_metadata_file(self, metadata_file_object,
metadata_role):
"""
<Purpose>
Non-public method that verifies an uncompressed metadata file. An
exception is raised if 'metadata_file_object is invalid. There is no
return value.
<Arguments>
metadata_file_object:
A 'securesystemslib.util.TempFile' instance containing the metadata
file. 'metadata_file_object' ensures the entire file is returned with
read().
metadata_role:
The role name of the metadata (e.g., 'root', 'targets',
'unclaimed').
<Exceptions>
securesystemslib.exceptions.FormatError:
In case the metadata file is valid JSON, but not valid TUF metadata.
tuf.exceptions.InvalidMetadataJSONError:
In case the metadata file is not valid JSON.
tuf.exceptions.ReplayedMetadataError:
In case the downloaded metadata file is older than the current one.
tuf.exceptions.RepositoryError:
In case the repository is somehow inconsistent; e.g. a parent has not
delegated to a child (contrary to expectations).
tuf.SignatureError:
In case the metadata file does not have a valid signature.
<Side Effects>
The content of 'metadata_file_object' is read and loaded.
<Returns>
None.
"""
metadata = metadata_file_object.read().decode('utf-8')
try:
metadata_signable = securesystemslib.util.load_json_string(metadata)
except Exception as exception:
raise tuf.exceptions.InvalidMetadataJSONError(exception)
else:
# Ensure the loaded 'metadata_signable' is properly formatted. Raise
# 'securesystemslib.exceptions.FormatError' if not.
tuf.formats.check_signable_object_format(metadata_signable)
# Is 'metadata_signable' expired?
self._ensure_not_expired(metadata_signable['signed'], metadata_role)
# We previously verified version numbers in this function, but have since
# moved version number verification to the functions that retrieve
# metadata.
# Verify the signature on the downloaded metadata object.
valid = tuf.sig.verify(metadata_signable, metadata_role,
self.repository_name)
if not valid:
raise securesystemslib.exceptions.BadSignatureError(metadata_role)
def _get_metadata_file(self, metadata_role, remote_filename,
upperbound_filelength, expected_version):
"""
<Purpose>
Non-public method that tries downloading, up to a certain length, a
metadata file from a list of known mirrors. As soon as the first valid
copy of the file is found, the downloaded file is returned and the
remaining mirrors are skipped.
<Arguments>
metadata_role:
The role name of the metadata (e.g., 'root', 'targets', 'unclaimed').
remote_filename:
The relative file path (on the remove repository) of 'metadata_role'.
upperbound_filelength:
The expected length, or upper bound, of the metadata file to be
downloaded.
expected_version:
The expected and required version number of the 'metadata_role' file
downloaded. 'expected_version' is an integer.
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
The metadata could not be fetched. This is raised only when all known
mirrors failed to provide a valid copy of the desired metadata file.
<Side Effects>
The file is downloaded from all known repository mirrors in the worst
case. If a valid copy of the file is found, it is stored in a temporary
file and returned.
<Returns>
A 'securesystemslib.util.TempFile' file-like object containing the
metadata.
"""
file_mirrors = tuf.mirrors.get_list_of_mirrors('meta', remote_filename,
self.mirrors)
# file_mirror (URL): error (Exception)
file_mirror_errors = {}
file_object = None
for file_mirror in file_mirrors:
try:
file_object = tuf.download.unsafe_download(file_mirror,
upperbound_filelength)
# Verify 'file_object' according to the callable function.
# 'file_object' is also verified if decompressed above (i.e., the
# uncompressed version).
metadata_signable = \
securesystemslib.util.load_json_string(file_object.read().decode('utf-8'))
# Determine if the specification version number is supported. It is
# assumed that "spec_version" is in (major.minor.fix) format, (for
# example: "1.4.3") and that releases with the same major version
# number maintain backwards compatibility. Consequently, if the major
# version number of new metadata equals our expected major version
# number, the new metadata is safe to parse.
try:
metadata_spec_version = metadata_signable['signed']['spec_version']
metadata_spec_version_split = metadata_spec_version.split('.')
metadata_spec_major_version = int(metadata_spec_version_split[0])
metadata_spec_minor_version = int(metadata_spec_version_split[1])
code_spec_version_split = tuf.SPECIFICATION_VERSION.split('.')
code_spec_major_version = int(code_spec_version_split[0])
code_spec_minor_version = int(code_spec_version_split[1])
if metadata_spec_major_version != code_spec_major_version:
raise tuf.exceptions.UnsupportedSpecificationError(
'Downloaded metadata that specifies an unsupported '
'spec_version. This code supports major version number: ' +
repr(code_spec_major_version) + '; however, the obtained '
'metadata lists version number: ' + str(metadata_spec_version))
#report to user if minor versions do not match, continue with update
if metadata_spec_minor_version != code_spec_minor_version:
logger.info("Downloaded metadata that specifies a different minor " +
"spec_version. This code has version " +
str(tuf.SPECIFICATION_VERSION) +
" and the metadata lists version number " +
str(metadata_spec_version) +
". The update will continue as the major versions match.")
except (ValueError, TypeError):
raise securesystemslib.exceptions.FormatError('Improperly'
' formatted spec_version, which must be in major.minor.fix format')
# If the version number is unspecified, ensure that the version number
# downloaded is greater than the currently trusted version number for
# 'metadata_role'.
version_downloaded = metadata_signable['signed']['version']
if expected_version is not None:
# Verify that the downloaded version matches the version expected by
# the caller.
if version_downloaded != expected_version:
raise tuf.exceptions.BadVersionNumberError('Downloaded'
' version number: ' + repr(version_downloaded) + '. Version'
' number MUST be: ' + repr(expected_version))
# The caller does not know which version to download. Verify that the
# downloaded version is at least greater than the one locally
# available.
else:
# Verify that the version number of the locally stored
# 'timestamp.json', if available, is less than what was downloaded.
# Otherwise, accept the new timestamp with version number
# 'version_downloaded'.
try:
current_version = \
self.metadata['current'][metadata_role]['version']
if version_downloaded < current_version:
raise tuf.exceptions.ReplayedMetadataError(metadata_role,
version_downloaded, current_version)
except KeyError:
logger.info(metadata_role + ' not available locally.')
self._verify_uncompressed_metadata_file(file_object, metadata_role)
except Exception as exception:
# Remember the error from this mirror, and "reset" the target file.
logger.exception('Update failed from ' + file_mirror + '.')
file_mirror_errors[file_mirror] = exception
file_object = None
else:
break
if file_object:
return file_object
else:
logger.error('Failed to update ' + repr(remote_filename) + ' from all'
' mirrors: ' + repr(file_mirror_errors))
raise tuf.exceptions.NoWorkingMirrorError(file_mirror_errors)
def _verify_root_chain_link(self, rolename, current_root_metadata,
next_root_metadata):
if rolename != 'root':
return True
current_root_role = current_root_metadata['roles'][rolename]
# Verify next metadata with current keys/threshold
valid = tuf.sig.verify(next_root_metadata, rolename, self.repository_name,
current_root_role['threshold'], current_root_role['keyids'])
if not valid:
raise securesystemslib.exceptions.BadSignatureError('Root is not signed'
' by previous threshold of keys.')
def _get_file(self, filepath, verify_file_function, file_type, file_length,
download_safely=True):
"""
<Purpose>
Non-public method that tries downloading, up to a certain length, a
metadata or target file from a list of known mirrors. As soon as the first
valid copy of the file is found, the rest of the mirrors will be skipped.
<Arguments>
filepath:
The relative metadata or target filepath.
verify_file_function:
A callable function that expects a 'securesystemslib.util.TempFile'
file-like object and raises an exception if the file is invalid.
Target files and uncompressed versions of metadata may be verified with
'verify_file_function'.
file_type:
Type of data needed for download, must correspond to one of the strings
in the list ['meta', 'target']. 'meta' for metadata file type or
'target' for target file type. It should correspond to the
'securesystemslib.formats.NAME_SCHEMA' format.
file_length:
The expected length, or upper bound, of the target or metadata file to
be downloaded.
download_safely:
A boolean switch to toggle safe or unsafe download of the file.
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
The metadata could not be fetched. This is raised only when all known
mirrors failed to provide a valid copy of the desired metadata file.
<Side Effects>
The file is downloaded from all known repository mirrors in the worst
case. If a valid copy of the file is found, it is stored in a temporary
file and returned.
<Returns>
A 'securesystemslib.util.TempFile' file-like object containing the
metadata or target.
"""
file_mirrors = tuf.mirrors.get_list_of_mirrors(file_type, filepath,
self.mirrors)
# file_mirror (URL): error (Exception)
file_mirror_errors = {}
file_object = None
for file_mirror in file_mirrors:
try:
# TODO: Instead of the more fragile 'download_safely' switch, unroll
# the function into two separate ones: one for "safe" download, and the
# other one for "unsafe" download? This should induce safer and more
# readable code.
if download_safely:
file_object = tuf.download.safe_download(file_mirror, file_length)
else:
file_object = tuf.download.unsafe_download(file_mirror, file_length)
# Verify 'file_object' according to the callable function.
# 'file_object' is also verified if decompressed above (i.e., the
# uncompressed version).
verify_file_function(file_object)
except Exception as exception:
# Remember the error from this mirror, and "reset" the target file.
logger.exception('Update failed from ' + file_mirror + '.')
file_mirror_errors[file_mirror] = exception
file_object = None
else:
break
if file_object:
return file_object
else:
logger.error('Failed to update ' + repr(filepath) + ' from'
' all mirrors: ' + repr(file_mirror_errors))
raise tuf.exceptions.NoWorkingMirrorError(file_mirror_errors)
def _update_metadata(self, metadata_role, upperbound_filelength, version=None):
"""
<Purpose>
Non-public method that downloads, verifies, and 'installs' the metadata
belonging to 'metadata_role'. Calling this method implies that the
'metadata_role' on the repository is newer than the client's, and thus
needs to be re-downloaded. The current and previous metadata stores are
updated if the newly downloaded metadata is successfully downloaded and
verified. This method also assumes that the store of top-level metadata
is the latest and exists.
<Arguments>
metadata_role:
The name of the metadata. This is a role name and should not end
in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'.
upperbound_filelength:
The expected length, or upper bound, of the metadata file to be
downloaded.
version:
The expected and required version number of the 'metadata_role' file
downloaded. 'expected_version' is an integer.
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
The metadata cannot be updated. This is not specific to a single
failure but rather indicates that all possible ways to update the
metadata have been tried and failed.
<Side Effects>
The metadata file belonging to 'metadata_role' is downloaded from a
repository mirror. If the metadata is valid, it is stored in the
metadata store.
<Returns>
None.
"""
# Construct the metadata filename as expected by the download/mirror
# modules.
metadata_filename = metadata_role + '.json'
# Attempt a file download from each mirror until the file is downloaded and
# verified. If the signature of the downloaded file is valid, proceed,
# otherwise log a warning and try the next mirror. 'metadata_file_object'
# is the file-like object returned by 'download.py'. 'metadata_signable'
# is the object extracted from 'metadata_file_object'. Metadata saved to
# files are regarded as 'signable' objects, conformant to
# 'tuf.formats.SIGNABLE_SCHEMA'.
#
# Some metadata (presently timestamp) will be downloaded "unsafely", in the
# sense that we can only estimate its true length and know nothing about
# its version. This is because not all metadata will have other metadata
# for it; otherwise we will have an infinite regress of metadata signing
# for each other. In this case, we will download the metadata up to the
# best length we can get for it, not request a specific version, but
# perform the rest of the checks (e.g., signature verification).
remote_filename = metadata_filename
filename_version = ''
if self.consistent_snapshot and version:
filename_version = version
dirname, basename = os.path.split(remote_filename)
remote_filename = os.path.join(
dirname, str(filename_version) + '.' + basename)
metadata_file_object = \
self._get_metadata_file(metadata_role, remote_filename,
upperbound_filelength, version)
# The metadata has been verified. Move the metadata file into place.
# First, move the 'current' metadata file to the 'previous' directory
# if it exists.
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filename)
current_filepath = os.path.abspath(current_filepath)
securesystemslib.util.ensure_parent_dir(current_filepath)
previous_filepath = os.path.join(self.metadata_directory['previous'],
metadata_filename)
previous_filepath = os.path.abspath(previous_filepath)
if os.path.exists(current_filepath):
# Previous metadata might not exist, say when delegations are added.
securesystemslib.util.ensure_parent_dir(previous_filepath)
shutil.move(current_filepath, previous_filepath)
# Next, move the verified updated metadata file to the 'current' directory.
# Note that the 'move' method comes from securesystemslib.util's TempFile class.
# 'metadata_file_object' is an instance of securesystemslib.util.TempFile.
metadata_signable = \
securesystemslib.util.load_json_string(metadata_file_object.read().decode('utf-8'))
metadata_file_object.move(current_filepath)
# Extract the metadata object so we can store it to the metadata store.
# 'current_metadata_object' set to 'None' if there is not an object
# stored for 'metadata_role'.
updated_metadata_object = metadata_signable['signed']
current_metadata_object = self.metadata['current'].get(metadata_role)
self._verify_root_chain_link(metadata_role, current_metadata_object,
metadata_signable)
# Finally, update the metadata and fileinfo stores, and rebuild the
# key and role info for the top-level roles if 'metadata_role' is root.
# Rebuilding the key and role info is required if the newly-installed
# root metadata has revoked keys or updated any top-level role information.
logger.debug('Updated ' + repr(current_filepath) + '.')
self.metadata['previous'][metadata_role] = current_metadata_object
self.metadata['current'][metadata_role] = updated_metadata_object
self._update_versioninfo(metadata_filename)
def _update_metadata_if_changed(self, metadata_role,
referenced_metadata='snapshot'):
"""
<Purpose>
Non-public method that updates the metadata for 'metadata_role' if it has
changed. With the exception of the 'timestamp' role, all the top-level
roles are updated by this method. The 'timestamp' role is always
downloaded from a mirror without first checking if it has been updated;
it is updated in refresh() by calling _update_metadata('timestamp').
This method is also called for delegated role metadata, which are
referenced by 'snapshot'.
If the metadata needs to be updated but an update cannot be obtained,
this method will delete the file (with the exception of the root
metadata, which never gets removed without a replacement).
Due to the way in which metadata files are updated, it is expected that
'referenced_metadata' is not out of date and trusted. The refresh()
method updates the top-level roles in 'timestamp -> snapshot ->
root -> targets' order. For delegated metadata, the parent role is
updated before the delegated role. Taking into account that
'referenced_metadata' is updated and verified before 'metadata_role',
this method determines if 'metadata_role' has changed by checking
the 'meta' field of the newly updated 'referenced_metadata'.
<Arguments>
metadata_role:
The name of the metadata. This is a role name and should not end
in '.json'. Examples: 'root', 'targets', 'unclaimed'.
referenced_metadata:
This is the metadata that provides the role information for
'metadata_role'. For the top-level roles, the 'snapshot' role
is the referenced metadata for the 'root', and 'targets' roles.
The 'timestamp' metadata is always downloaded regardless. In
other words, it is updated by calling _update_metadata('timestamp')
and not by this method. The referenced metadata for 'snapshot'
is 'timestamp'. See refresh().
<Exceptions>
tuf.exceptions.NoWorkingMirrorError:
If 'metadata_role' could not be downloaded after determining that it
had changed.
tuf.exceptions.RepositoryError:
If the referenced metadata is missing.
<Side Effects>
If it is determined that 'metadata_role' has been updated, the metadata
store (i.e., self.metadata) is updated with the new metadata and the
affected stores modified (i.e., the previous metadata store is updated).
If the metadata is 'targets' or a delegated targets role, the role
database is updated with the new information, including its delegated
roles.
<Returns>
None.
"""
metadata_filename = metadata_role + '.json'
expected_versioninfo = None
# Ensure the referenced metadata has been loaded. The 'root' role may be
# updated without having 'snapshot' available.
if referenced_metadata not in self.metadata['current']:
raise tuf.exceptions.RepositoryError('Cannot update'
' ' + repr(metadata_role) + ' because ' + referenced_metadata + ' is'
' missing.')
# The referenced metadata has been loaded. Extract the new versioninfo for
# 'metadata_role' from it.
else:
logger.debug(repr(metadata_role) + ' referenced in ' +
repr(referenced_metadata)+ '. ' + repr(metadata_role) +
' may be updated.')
# Simply return if the metadata for 'metadata_role' has not been updated,
# according to the uncompressed metadata provided by the referenced
# metadata. The metadata is considered updated if its version number is
# strictly greater than its currently trusted version number.
expected_versioninfo = self.metadata['current'][referenced_metadata] \
['meta'][metadata_filename]
if not self._versioninfo_has_been_updated(metadata_filename,
expected_versioninfo):
logger.info(repr(metadata_filename) + ' up-to-date.')
# Since we have not downloaded a new version of this metadata, we should
# check to see if our local version is stale and notify the user if so.
# This raises tuf.exceptions.ExpiredMetadataError if the metadata we have
# is expired. Resolves issue #322.
self._ensure_not_expired(self.metadata['current'][metadata_role],
metadata_role)
# TODO: If 'metadata_role' is root or snapshot, we should verify that
# root's hash matches what's in snapshot, and that snapshot hash matches
# what's listed in timestamp.json.
return
logger.debug('Metadata ' + repr(metadata_filename) + ' has changed.')
# The file lengths of metadata are unknown, only their version numbers are
# known. Set an upper limit for the length of the downloaded file for each
# expected role. Note: The Timestamp role is not updated via this
# function.
if metadata_role == 'snapshot':
upperbound_filelength = tuf.settings.DEFAULT_SNAPSHOT_REQUIRED_LENGTH
elif metadata_role == 'root':
upperbound_filelength = DEFAULT_ROOT_UPPERLENGTH
# The metadata is considered Targets (or delegated Targets metadata).
else:
upperbound_filelength = tuf.settings.DEFAULT_TARGETS_REQUIRED_LENGTH
try:
self._update_metadata(metadata_role, upperbound_filelength,
expected_versioninfo['version'])
except Exception:
# The current metadata we have is not current but we couldn't get new
# metadata. We shouldn't use the old metadata anymore. This will get rid
# of in-memory knowledge of the role and delegated roles, but will leave
# delegated metadata files as current files on disk.
#
# TODO: Should we get rid of the delegated metadata files? We shouldn't
# need to, but we need to check the trust implications of the current
# implementation.
self._delete_metadata(metadata_role)
logger.error('Metadata for ' + repr(metadata_role) + ' cannot'
' be updated.')
raise
else:
# We need to import the delegated roles of 'metadata_role', since its
# list of delegations might have changed from what was previously
# loaded..
# TODO: Should we remove the keys of the delegated roles?
self._import_delegations(metadata_role)
def _versioninfo_has_been_updated(self, metadata_filename, new_versioninfo):
"""
<Purpose>
Non-public method that determines whether the current versioninfo of
'metadata_filename' is less than 'new_versioninfo' (i.e., the version
number has been incremented). The 'new_versioninfo' argument should be
extracted from the latest copy of the metadata that references
'metadata_filename'. Example: 'root.json' would be referenced by
'snapshot.json'.
'new_versioninfo' should only be 'None' if this is for updating
'root.json' without having 'snapshot.json' available.
<Arguments>
metadadata_filename:
The metadata filename for the role. For the 'root' role,
'metadata_filename' would be 'root.json'.
new_versioninfo:
A dict object representing the new file information for
'metadata_filename'. 'new_versioninfo' may be 'None' when
updating 'root' without having 'snapshot' available. This
dict conforms to 'tuf.formats.VERSIONINFO_SCHEMA' and has
the form:
{'version': 288}
<Exceptions>
None.
<Side Effects>
If there is no versioninfo currently loaded for 'metadata_filename', try
to load it.
<Returns>
Boolean. True if the versioninfo has changed, False otherwise.
"""
# If there is no versioninfo currently stored for 'metadata_filename',
# try to load the file, calculate the versioninfo, and store it.
if metadata_filename not in self.versioninfo:
self._update_versioninfo(metadata_filename)
# Return true if there is no versioninfo for 'metadata_filename'.
# 'metadata_filename' is not in the 'self.versioninfo' store
# and it doesn't exist in the 'current' metadata location.
if self.versioninfo[metadata_filename] is None:
return True
current_versioninfo = self.versioninfo[metadata_filename]
logger.debug('New version for ' + repr(metadata_filename) +
': ' + repr(new_versioninfo['version']) + '. Old version: ' +
repr(current_versioninfo['version']))
if new_versioninfo['version'] > current_versioninfo['version']:
return True
else:
return False
def _update_versioninfo(self, metadata_filename):
"""
<Purpose>
Non-public method that updates the 'self.versioninfo' entry for the
metadata belonging to 'metadata_filename'. If the current metadata for
'metadata_filename' cannot be loaded, set its 'versioninfo' to 'None' to
signal that it is not in 'self.versioninfo' AND it also doesn't exist
locally.
<Arguments>
metadata_filename:
The metadata filename for the role. For the 'root' role,
'metadata_filename' would be 'root.json'.
<Exceptions>
None.
<Side Effects>
The version number of 'metadata_filename' is calculated and stored in its
corresponding entry in 'self.versioninfo'.
<Returns>
None.
"""
# In case we delayed loading the metadata and didn't do it in
# __init__ (such as with delegated metadata), then get the version
# info now.
# Save the path to the current metadata file for 'metadata_filename'.
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filename)
# If the path is invalid, simply return and leave versioninfo unset.
if not os.path.exists(current_filepath):
self.versioninfo[metadata_filename] = None
return
# Extract the version information from the trusted snapshot role and save
# it to the 'self.versioninfo' store.
if metadata_filename == 'timestamp.json':
trusted_versioninfo = \
self.metadata['current']['timestamp']['version']
# When updating snapshot.json, the client either (1) has a copy of
# snapshot.json, or (2) is in the process of obtaining it by first
# downloading timestamp.json. Note: Clients are allowed to have only
# root.json initially, and perform a refresh of top-level metadata to
# obtain the remaining roles.
elif metadata_filename == 'snapshot.json':
# Verify the version number of the currently trusted snapshot.json in
# snapshot.json itself. Checking the version number specified in
# timestamp.json may be greater than the version specified in the
# client's copy of snapshot.json.
try:
timestamp_version_number = self.metadata['current']['snapshot']['version']
trusted_versioninfo = tuf.formats.make_versioninfo(
timestamp_version_number)
except KeyError:
trusted_versioninfo = \
self.metadata['current']['timestamp']['meta']['snapshot.json']
else:
try:
# The metadata file names in 'self.metadata' exclude the role
# extension. Strip the '.json' extension when checking if
# 'metadata_filename' currently exists.
targets_version_number = \
self.metadata['current'][metadata_filename[:-len('.json')]]['version']
trusted_versioninfo = \
tuf.formats.make_versioninfo(targets_version_number)
except KeyError:
trusted_versioninfo = \
self.metadata['current']['snapshot']['meta'][metadata_filename]
self.versioninfo[metadata_filename] = trusted_versioninfo
def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):
"""
<Purpose>
Non-public method that determines whether the current fileinfo of
'metadata_filename' differs from 'new_fileinfo'. The 'new_fileinfo'
argument should be extracted from the latest copy of the metadata that
references 'metadata_filename'. Example: 'root.json' would be referenced
by 'snapshot.json'.
'new_fileinfo' should only be 'None' if this is for updating 'root.json'
without having 'snapshot.json' available.
<Arguments>
metadadata_filename:
The metadata filename for the role. For the 'root' role,
'metadata_filename' would be 'root.json'.
new_fileinfo:
A dict object representing the new file information for
'metadata_filename'. 'new_fileinfo' may be 'None' when
updating 'root' without having 'snapshot' available. This
dict conforms to 'tuf.formats.FILEINFO_SCHEMA' and has
the form:
{'length': 23423
'hashes': {'sha256': adfbc32343..}}
<Exceptions>
None.
<Side Effects>
If there is no fileinfo currently loaded for 'metada_filename',
try to load it.
<Returns>
Boolean. True if the fileinfo has changed, false otherwise.
"""
# If there is no fileinfo currently stored for 'metadata_filename',
# try to load the file, calculate the fileinfo, and store it.
if metadata_filename not in self.fileinfo:
self._update_fileinfo(metadata_filename)
# Return true if there is no fileinfo for 'metadata_filename'.
# 'metadata_filename' is not in the 'self.fileinfo' store
# and it doesn't exist in the 'current' metadata location.
if self.fileinfo[metadata_filename] is None:
return True
current_fileinfo = self.fileinfo[metadata_filename]
if current_fileinfo['length'] != new_fileinfo['length']:
return True
# Now compare hashes. Note that the reason we can't just do a simple
# equality check on the fileinfo dicts is that we want to support the
# case where the hash algorithms listed in the metadata have changed
# without having that result in considering all files as needing to be
# updated, or not all hash algorithms listed can be calculated on the
# specific client.
for algorithm, hash_value in six.iteritems(new_fileinfo['hashes']):
# We're only looking for a single match. This isn't a security
# check, we just want to prevent unnecessary downloads.
if algorithm in current_fileinfo['hashes']:
if hash_value == current_fileinfo['hashes'][algorithm]:
return False
return True
def _update_fileinfo(self, metadata_filename):
"""
<Purpose>
Non-public method that updates the 'self.fileinfo' entry for the metadata
belonging to 'metadata_filename'. If the 'current' metadata for
'metadata_filename' cannot be loaded, set its fileinfo' to 'None' to
signal that it is not in the 'self.fileinfo' AND it also doesn't exist
locally.
<Arguments>
metadata_filename:
The metadata filename for the role. For the 'root' role,
'metadata_filename' would be 'root.json'.
<Exceptions>
None.
<Side Effects>
The file details of 'metadata_filename' is calculated and
stored in 'self.fileinfo'.
<Returns>
None.
"""
# In case we delayed loading the metadata and didn't do it in
# __init__ (such as with delegated metadata), then get the file
# info now.
# Save the path to the current metadata file for 'metadata_filename'.
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filename)
# If the path is invalid, simply return and leave fileinfo unset.
if not os.path.exists(current_filepath):
self.fileinfo[metadata_filename] = None
return
# Extract the file information from the actual file and save it
# to the fileinfo store.
file_length, hashes = securesystemslib.util.get_file_details(
current_filepath)
metadata_fileinfo = tuf.formats.make_fileinfo(file_length, hashes)
self.fileinfo[metadata_filename] = metadata_fileinfo
def _move_current_to_previous(self, metadata_role):
"""
<Purpose>
Non-public method that moves the current metadata file for 'metadata_role'
to the previous directory.
<Arguments>
metadata_role:
The name of the metadata. This is a role name and should not end
in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'.
<Exceptions>
None.
<Side Effects>
The metadata file for 'metadata_role' is removed from 'current'
and moved to the 'previous' directory.
<Returns>
None.
"""
# Get the 'current' and 'previous' full file paths for 'metadata_role'
metadata_filepath = metadata_role + '.json'
previous_filepath = os.path.join(self.metadata_directory['previous'],
metadata_filepath)
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filepath)
# Remove the previous path if it exists.
if os.path.exists(previous_filepath):
os.remove(previous_filepath)
# Move the current path to the previous path.
if os.path.exists(current_filepath):
securesystemslib.util.ensure_parent_dir(previous_filepath)
os.rename(current_filepath, previous_filepath)
def _delete_metadata(self, metadata_role):
"""
<Purpose>
Non-public method that removes all (current) knowledge of 'metadata_role'.
The metadata belonging to 'metadata_role' is removed from the current
'self.metadata' store and from the role database. The 'root.json' role
file is never removed.
<Arguments>
metadata_role:
The name of the metadata. This is a role name and should not end
in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'.
<Exceptions>
None.
<Side Effects>
The role database is modified and the metadata for 'metadata_role'
removed from the 'self.metadata' store.
<Returns>
None.
"""
# The root metadata role is never deleted without a replacement.
if metadata_role == 'root':
return
# Get rid of the current metadata file.
self._move_current_to_previous(metadata_role)
# Remove knowledge of the role.
if metadata_role in self.metadata['current']:
del self.metadata['current'][metadata_role]
tuf.roledb.remove_role(metadata_role, self.repository_name)
def _ensure_not_expired(self, metadata_object, metadata_rolename):
"""
<Purpose>
Non-public method that raises an exception if the current specified
metadata has expired.
<Arguments>
metadata_object:
The metadata that should be expired, a 'tuf.formats.ANYROLE_SCHEMA'
object.
metadata_rolename:
The name of the metadata. This is a role name and should not end
in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'.
<Exceptions>
tuf.exceptions.ExpiredMetadataError:
If 'metadata_rolename' has expired.
<Side Effects>
None.
<Returns>
None.
"""
# Extract the expiration time.
expires = metadata_object['expires']
# If the current time has surpassed the expiration date, raise an
# exception. 'expires' is in
# 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA' format (e.g.,
# '1985-10-21T01:22:00Z'.) Convert it to a unix timestamp and compare it
# against the current time.time() (also in Unix/POSIX time format, although
# with microseconds attached.)
current_time = int(time.time())
# Generate a user-friendly error message if 'expires' is less than the
# current time (i.e., a local time.)
expires_datetime = iso8601.parse_date(expires)
expires_timestamp = tuf.formats.datetime_to_unix_timestamp(expires_datetime)
if expires_timestamp < current_time:
message = 'Metadata '+repr(metadata_rolename)+' expired on ' + \
expires_datetime.ctime() + ' (UTC).'
logger.error(message)
raise tuf.exceptions.ExpiredMetadataError(message)
def all_targets(self):
"""
<Purpose>
NOTE: This function is deprecated. Its behavior with regard to which
delegating Targets roles are trusted to determine how to validate a
delegated Targets role is NOT WELL DEFINED. Please transition to use of
get_one_valid_targetinfo()!
Get a list of the target information for all the trusted targets on the
repository. This list also includes all the targets of delegated roles.
Targets of the list returned are ordered according the trusted order of
the delegated roles, where parent roles come before children. The list
conforms to 'tuf.formats.TARGETINFOS_SCHEMA' and has the form:
[{'filepath': 'a/b/c.txt',
'fileinfo': {'length': 13323,
'hashes': {'sha256': dbfac345..}}
...]
<Arguments>
None.
<Exceptions>
tuf.exceptions.RepositoryError:
If the metadata for the 'targets' role is missing from
the 'snapshot' metadata.
tuf.exceptions.UnknownRoleError:
If one of the roles could not be found in the role database.
<Side Effects>
The metadata for target roles is updated and stored.
<Returns>
A list of targets, conformant to
'tuf.formats.TARGETINFOS_SCHEMA'.
"""
warnings.warn(
'Support for all_targets() will be removed in a future release.'
' get_one_valid_targetinfo() should be used instead.',
DeprecationWarning)
# Load the most up-to-date targets of the 'targets' role and all
# delegated roles.
self._refresh_targets_metadata(refresh_all_delegated_roles=True)
# Fetch the targets for the 'targets' role.
all_targets = self._targets_of_role('targets', skip_refresh=True)
# Fetch the targets of the delegated roles. get_rolenames returns
# all roles available on the repository.
delegated_targets = []
for role in tuf.roledb.get_rolenames(self.repository_name):
if role in ['root', 'snapshot', 'targets', 'timestamp']:
continue
else:
delegated_targets.extend(self._targets_of_role(role, skip_refresh=True))
all_targets.extend(delegated_targets)
return all_targets
def _refresh_targets_metadata(self, rolename='targets',
refresh_all_delegated_roles=False):
"""
<Purpose>
Non-public method that refreshes the targets metadata of 'rolename'. If
'refresh_all_delegated_roles' is True, include all the delegations that
follow 'rolename'. The metadata for the 'targets' role is updated in
refresh() by the _update_metadata_if_changed('targets') call, not here.
Delegated roles are not loaded when the repository is first initialized.
They are loaded from disk, updated if they have changed, and stored to
the 'self.metadata' store by this method. This method is called by
get_one_valid_targetinfo().
<Arguments>
rolename:
This is a delegated role name and should not end in '.json'. Example:
'unclaimed'.
refresh_all_delegated_roles:
Boolean indicating if all the delegated roles available in the
repository (via snapshot.json) should be refreshed.
<Exceptions>
tuf.exceptions.RepositoryError:
If the metadata file for the 'targets' role is missing from the
'snapshot' metadata.
<Side Effects>
The metadata for the delegated roles are loaded and updated if they
have changed. Delegated metadata is removed from the role database if
it has expired.
<Returns>
None.
"""
roles_to_update = []
if rolename + '.json' in self.metadata['current']['snapshot']['meta']:
roles_to_update.append(rolename)
if refresh_all_delegated_roles:
for role in six.iterkeys(self.metadata['current']['snapshot']['meta']):
# snapshot.json keeps track of root.json, targets.json, and delegated
# roles (e.g., django.json, unclaimed.json). Remove the 'targets' role
# because it gets updated when the targets.json file is updated in
# _update_metadata_if_changed('targets') and root.
if role.endswith('.json'):
role = role[:-len('.json')]
if role not in ['root', 'targets', rolename]:
roles_to_update.append(role)
else:
continue
# If there is nothing to refresh, we are done.
if not roles_to_update:
return
logger.debug('Roles to update: ' + repr(roles_to_update) + '.')
# Iterate 'roles_to_update', and load and update its metadata file if it
# has changed.
for rolename in roles_to_update:
self._load_metadata_from_file('previous', rolename)
self._load_metadata_from_file('current', rolename)
self._update_metadata_if_changed(rolename)
def _targets_of_role(self, rolename, targets=None, skip_refresh=False):
"""
<Purpose>
Non-public method that returns the target information of all the targets
of 'rolename'. The returned information is a list conformant to
'tuf.formats.TARGETINFOS_SCHEMA', and has the form:
[{'filepath': 'a/b/c.txt',
'fileinfo': {'length': 13323,
'hashes': {'sha256': dbfac345..}}
...]
<Arguments>
rolename:
This is a role name and should not end in '.json'. Examples: 'targets',
'unclaimed'.
targets:
A list of targets containing target information, conformant to
'tuf.formats.TARGETINFOS_SCHEMA'.
skip_refresh:
A boolean indicating if the target metadata for 'rolename'
should be refreshed.
<Exceptions>
tuf.exceptions.UnknownRoleError:
If 'rolename' is not found in the role database.
<Side Effects>
The metadata for 'rolename' is refreshed if 'skip_refresh' is False.
<Returns>
A list of dict objects containing the target information of all the
targets of 'rolename'. Conformant to
'tuf.formats.TARGETINFOS_SCHEMA'.
"""
if targets is None:
targets = []
targets_of_role = list(targets)
logger.debug('Getting targets of role: ' + repr(rolename) + '.')
if not tuf.roledb.role_exists(rolename, self.repository_name):
raise tuf.exceptions.UnknownRoleError(rolename)
# We do not need to worry about the target paths being trusted because
# this is enforced before any new metadata is accepted.
if not skip_refresh:
self._refresh_targets_metadata(rolename)
# Do we have metadata for 'rolename'?
if rolename not in self.metadata['current']:
logger.debug('No metadata for ' + repr(rolename) + '.'
' Unable to determine targets.')
return []
# Get the targets specified by the role itself.
for filepath, fileinfo in six.iteritems(self.metadata['current'][rolename].get('targets', [])):
new_target = {}
new_target['filepath'] = filepath
new_target['fileinfo'] = fileinfo
targets_of_role.append(new_target)
return targets_of_role
def targets_of_role(self, rolename='targets'):
"""
<Purpose>
NOTE: This function is deprecated. Use with rolename 'targets' is secure
and the behavior well-defined, but use with any delegated targets role is
not. Please transition use for delegated targets roles to
get_one_valid_targetinfo(). More information is below.
Return a list of trusted targets directly specified by 'rolename'.
The returned information is a list conformant to
'tuf.formats.TARGETINFOS_SCHEMA', and has the form:
[{'filepath': 'a/b/c.txt',
'fileinfo': {'length': 13323,
'hashes': {'sha256': dbfac345..}}
...]
The metadata of 'rolename' is updated if out of date, including the
metadata of its parent roles (i.e., the minimum roles needed to set the
chain of trust).
<Arguments>
rolename:
The name of the role whose list of targets are wanted.
The name of the role should start with 'targets'.
<Exceptions>
securesystemslib.exceptions.FormatError:
If 'rolename' is improperly formatted.
tuf.exceptions.RepositoryError:
If the metadata of 'rolename' cannot be updated.
tuf.exceptions.UnknownRoleError:
If 'rolename' is not found in the role database.
<Side Effects>
The metadata of updated delegated roles are downloaded and stored.
<Returns>
A list of targets, conformant to
'tuf.formats.TARGETINFOS_SCHEMA'.
"""
warnings.warn(
'Support for targets_of_role() will be removed in a future release.'
' get_one_valid_targetinfo() should be used instead.',
DeprecationWarning)
# Does 'rolename' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
tuf.formats.RELPATH_SCHEMA.check_match(rolename)
# If we've been given a delegated targets role, we don't know how to
# validate it without knowing what the delegating role is -- there could
# be several roles that delegate to the given role. Behavior of this
# function for roles other than Targets is not well defined as a result.
# This function is deprecated, but:
# - Usage of this function or a future successor makes sense when the
# role of interest is Targets, since we always know exactly how to
# validate Targets (We use root.).
# - Until it's removed (hopefully soon), we'll try to provide what it has
# always provided. To do this, we fetch and "validate" all delegated
# roles listed by snapshot. For delegated roles only, the order of the
# validation impacts the security of the validation -- the most-
# recently-validated role delegating to a role you are currently
# validating determines the expected keyids and threshold of the role
# you are currently validating. That is NOT GOOD. Again, please switch
# to get_one_valid_targetinfo, which is well-defined and secure.
if rolename != 'targets':
self._refresh_targets_metadata(refresh_all_delegated_roles=True)
if not tuf.roledb.role_exists(rolename, self.repository_name):
raise tuf.exceptions.UnknownRoleError(rolename)
return self._targets_of_role(rolename, skip_refresh=True)
def get_one_valid_targetinfo(self, target_filepath):
"""
<Purpose>
Return the target information for 'target_filepath', and update its
corresponding metadata, if necessary. 'target_filepath' must match
exactly as it appears in metadata, and should not contain URL encoding
escapes.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
securesystemslib.exceptions.FormatError:
If 'target_filepath' is improperly formatted.
tuf.exceptions.UnknownTargetError:
If 'target_filepath' was not found.
Any other unforeseen runtime exception.
<Side Effects>
The metadata for updated delegated roles are downloaded and stored.
<Returns>
The target information for 'target_filepath', conformant to
'tuf.formats.TARGETINFO_SCHEMA'.
"""
# Does 'target_filepath' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
tuf.formats.RELPATH_SCHEMA.check_match(target_filepath)
target_filepath = target_filepath.replace('\\', '/')
if target_filepath.startswith('/'):
raise tuf.exceptions.FormatError('The requested target file cannot'
' contain a leading path separator: ' + repr(target_filepath))
# Get target by looking at roles in order of priority tags.
target = self._preorder_depth_first_walk(target_filepath)
# Raise an exception if the target information could not be retrieved.
if target is None:
logger.error(repr(target_filepath) + ' not found.')
raise tuf.exceptions.UnknownTargetError(repr(target_filepath) + ' not'
' found.')
# Otherwise, return the found target.
else:
return target
def _preorder_depth_first_walk(self, target_filepath):
"""
<Purpose>
Non-public method that interrogates the tree of target delegations in
order of appearance (which implicitly order trustworthiness), and returns
the matching target found in the most trusted role.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
securesystemslib.exceptions.FormatError:
If 'target_filepath' is improperly formatted.
tuf.exceptions.RepositoryError:
If 'target_filepath' is not found.
<Side Effects>
The metadata for updated delegated roles are downloaded and stored.
<Returns>
The target information for 'target_filepath', conformant to
'tuf.formats.TARGETINFO_SCHEMA'.
"""
target = None
current_metadata = self.metadata['current']
role_names = ['targets']
visited_role_names = set()
number_of_delegations = tuf.settings.MAX_NUMBER_OF_DELEGATIONS
# Ensure the client has the most up-to-date version of 'targets.json'.
# Raise 'tuf.exceptions.NoWorkingMirrorError' if the changed metadata
# cannot be successfully downloaded and 'tuf.exceptions.RepositoryError' if
# the referenced metadata is missing. Target methods such as this one are
# called after the top-level metadata have been refreshed (i.e.,
# updater.refresh()).
self._update_metadata_if_changed('targets')
# Preorder depth-first traversal of the graph of target delegations.
while target is None and number_of_delegations > 0 and len(role_names) > 0:
# Pop the role name from the top of the stack.
role_name = role_names.pop(-1)
# Skip any visited current role to prevent cycles.
if role_name in visited_role_names:
logger.debug('Skipping visited current role ' + repr(role_name))
continue
# The metadata for 'role_name' must be downloaded/updated before its
# targets, delegations, and child roles can be inspected.
# self.metadata['current'][role_name] is currently missing.
# _refresh_targets_metadata() does not refresh 'targets.json', it
# expects _update_metadata_if_changed() to have already refreshed it,
# which this function has checked above.
self._refresh_targets_metadata(role_name,
refresh_all_delegated_roles=False)
role_metadata = current_metadata[role_name]
targets = role_metadata['targets']
delegations = role_metadata.get('delegations', {})
child_roles = delegations.get('roles', [])
target = self._get_target_from_targets_role(role_name, targets,
target_filepath)
# After preorder check, add current role to set of visited roles.
visited_role_names.add(role_name)
# And also decrement number of visited roles.
number_of_delegations -= 1
if target is None:
child_roles_to_visit = []
# NOTE: This may be a slow operation if there are many delegated roles.
for child_role in child_roles:
child_role_name = self._visit_child_role(child_role, target_filepath)
if child_role['terminating'] and child_role_name is not None:
logger.debug('Adding child role ' + repr(child_role_name))
logger.debug('Not backtracking to other roles.')
role_names = []
child_roles_to_visit.append(child_role_name)
break
elif child_role_name is None:
logger.debug('Skipping child role ' + repr(child_role_name))
else:
logger.debug('Adding child role ' + repr(child_role_name))
child_roles_to_visit.append(child_role_name)
# Push 'child_roles_to_visit' in reverse order of appearance onto
# 'role_names'. Roles are popped from the end of the 'role_names'
# list.
child_roles_to_visit.reverse()
role_names.extend(child_roles_to_visit)
else:
logger.debug('Found target in current role ' + repr(role_name))
if target is None and number_of_delegations == 0 and len(role_names) > 0:
logger.debug(repr(len(role_names)) + ' roles left to visit, ' +
'but allowed to visit at most ' +
repr(tuf.settings.MAX_NUMBER_OF_DELEGATIONS) + ' delegations.')
return target
def _get_target_from_targets_role(self, role_name, targets, target_filepath):
"""
<Purpose>
Non-public method that determines whether the targets role with the given
'role_name' has the target with the name 'target_filepath'.
<Arguments>
role_name:
The name of the targets role that we are inspecting.
targets:
The targets of the Targets role with the name 'role_name'.
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The target information for 'target_filepath', conformant to
'tuf.formats.TARGETINFO_SCHEMA'.
"""
# Does the current role name have our target?
logger.debug('Asking role ' + repr(role_name) + ' about'
' target ' + repr(target_filepath))
target = targets.get(target_filepath)
if target:
logger.debug('Found target ' + target_filepath + ' in role ' + role_name)
return {'filepath': target_filepath, 'fileinfo': target}
else:
logger.debug(
'Target file ' + target_filepath + ' not found in role ' + role_name)
return None
def _visit_child_role(self, child_role, target_filepath):
"""
<Purpose>
Non-public method that determines whether the given 'target_filepath'
is an allowed path of 'child_role'.
Ensure that we explore only delegated roles trusted with the target. The
metadata for 'child_role' should have been refreshed prior to this point,
however, the paths/targets that 'child_role' signs for have not been
verified (as intended). The paths/targets that 'child_role' is allowed
to specify in its metadata depends on the delegating role, and thus is
left to the caller to verify. We verify here that 'target_filepath'
is an allowed path according to the delegated 'child_role'.
TODO: Should the TUF spec restrict the repository to one particular
algorithm? Should we allow the repository to specify in the role
dictionary the algorithm used for these generated hashed paths?
<Arguments>
child_role:
The delegation targets role object of 'child_role', containing its
paths, path_hash_prefixes, keys, and so on.
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
If 'child_role' has been delegated the target with the name
'target_filepath', then we return the role name of 'child_role'.
Otherwise, we return None.
"""
child_role_name = child_role['name']
child_role_paths = child_role.get('paths')
child_role_path_hash_prefixes = child_role.get('path_hash_prefixes')
if child_role_path_hash_prefixes is not None:
target_filepath_hash = self._get_target_hash(target_filepath)
for child_role_path_hash_prefix in child_role_path_hash_prefixes:
if target_filepath_hash.startswith(child_role_path_hash_prefix):
return child_role_name
else:
continue
elif child_role_paths is not None:
# Is 'child_role_name' allowed to sign for 'target_filepath'?
for child_role_path in child_role_paths:
# A child role path may be an explicit path or glob pattern (Unix
# shell-style wildcards). The child role 'child_role_name' is returned
# if 'target_filepath' is equal to or matches 'child_role_path'.
# Explicit filepaths are also considered matches. A repo maintainer
# might delegate a glob pattern with a leading path separator, while
# the client requests a matching target without a leading path
# separator - make sure to strip any leading path separators so that a
# match is made. Example: "foo.tgz" should match with "/*.tgz".
if fnmatch.fnmatch(target_filepath.lstrip(os.sep), child_role_path.lstrip(os.sep)):
logger.debug('Child role ' + repr(child_role_name) + ' is allowed to'
' sign for ' + repr(target_filepath))
return child_role_name
else:
logger.debug(
'The given target path ' + repr(target_filepath) + ' does not'
' match the trusted path or glob pattern: ' + repr(child_role_path))
continue
else:
# 'role_name' should have been validated when it was downloaded.
# The 'paths' or 'path_hash_prefixes' fields should not be missing,
# so we raise a format error here in case they are both missing.
raise securesystemslib.exceptions.FormatError(repr(child_role_name) + ' '
'has neither a "paths" nor "path_hash_prefixes". At least'
' one of these attributes must be present.')
return None
def _get_target_hash(self, target_filepath, hash_function='sha256'):
"""
<Purpose>
Non-public method that computes the hash of 'target_filepath'. This is
useful in conjunction with the "path_hash_prefixes" attribute in a
delegated targets role, which tells us which paths it is implicitly
responsible for.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
hash_function:
The algorithm used by the repository to generate the hashes of the
target filepaths. The repository may optionally organize targets into
hashed bins to ease target delegations and role metadata management.
The use of consistent hashing allows for a uniform distribution of
targets into bins.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The hash of 'target_filepath'.
"""
# Calculate the hash of the filepath to determine which bin to find the
# target. The client currently assumes the repository (i.e., repository
# tool) uses 'hash_function' to generate hashes and UTF-8.
digest_object = securesystemslib.hash.digest(hash_function)
encoded_target_filepath = target_filepath.encode('utf-8')
digest_object.update(encoded_target_filepath)
target_filepath_hash = digest_object.hexdigest()
return target_filepath_hash
def remove_obsolete_targets(self, destination_directory):
"""
<Purpose>
Remove any files that are in 'previous' but not 'current'. This makes it
so if you remove a file from a repository, it actually goes away. The
targets for the 'targets' role and all delegated roles are checked.
<Arguments>
destination_directory:
The directory containing the target files tracked by TUF.
<Exceptions>
securesystemslib.exceptions.FormatError:
If 'destination_directory' is improperly formatted.
tuf.exceptions.RepositoryError:
If an error occurred removing any files.
<Side Effects>
Target files are removed from disk.
<Returns>
None.
"""
# Does 'destination_directory' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(destination_directory)
# Iterate the rolenames and verify whether the 'previous' directory
# contains a target no longer found in 'current'.
for role in tuf.roledb.get_rolenames(self.repository_name):
if role.startswith('targets'):
if role in self.metadata['previous'] and self.metadata['previous'][role] != None:
for target in self.metadata['previous'][role]['targets']:
if target not in self.metadata['current'][role]['targets']:
# 'target' is only in 'previous', so remove it.
logger.warning('Removing obsolete file: ' + repr(target) + '.')
# Remove the file if it hasn't been removed already.
destination = \
os.path.join(destination_directory, target.lstrip(os.sep))
try:
os.remove(destination)
except OSError as e:
# If 'filename' already removed, just log it.
if e.errno == errno.ENOENT:
logger.info('File ' + repr(destination) + ' was already'
' removed.')
else:
logger.error(str(e))
else:
logger.debug('Skipping: ' + repr(target) + '. It is still'
' a current target.')
else:
logger.debug('Skipping: ' + repr(role) + '. Not in the previous'
' metadata')
def updated_targets(self, targets, destination_directory):
"""
<Purpose>
Checks files in the provided directory against the provided file metadata.
Filters the provided target info, returning a subset: only the metadata
for targets for which the target file either does not exist in the
provided directory, or for which the target file in the provided directory
does not match the provided metadata.
A principle use of this function is to determine which target files need
to be downloaded. If the caller first uses get_one_valid_target_info()
calls to obtain up-to-date, valid metadata for targets, the caller can
then call updated_targets() to determine if that metadata does not match
what exists already on disk (in the provided directory). The returned
values can then be used in download_file() calls to update the files that
didn't exist or didn't match.
The returned information is a list conformant to
'tuf.formats.TARGETINFOS_SCHEMA' and has the form:
[{'filepath': 'a/b/c.txt',
'fileinfo': {'length': 13323,
'hashes': {'sha256': dbfac345..}}
...]
<Arguments>
targets:
Metadata about the expected state of target files, against which local
files will be checked. This should be a list of target info
dictionaries; i.e. 'targets' must be conformant to
tuf.formats.TARGETINFOS_SCHEMA.
destination_directory:
The directory containing the target files.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the arguments are improperly formatted.
<Side Effects>
The files in 'targets' are read and their hashes computed.
<Returns>
A list of target info dictionaries. The list conforms to
'tuf.formats.TARGETINFOS_SCHEMA'.
This is a strict subset of the argument 'targets'.
"""
# Do the arguments have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
tuf.formats.TARGETINFOS_SCHEMA.check_match(targets)
securesystemslib.formats.PATH_SCHEMA.check_match(destination_directory)
# Keep track of the target objects and filepaths of updated targets.
# Return 'updated_targets' and use 'updated_targetpaths' to avoid
# duplicates.
updated_targets = []
updated_targetpaths = []
for target in targets:
# Prepend 'destination_directory' to the target's relative filepath (as
# stored in metadata.) Verify the hash of 'target_filepath' against
# each hash listed for its fileinfo. Note: join() discards
# 'destination_directory' if 'filepath' contains a leading path separator
# (i.e., is treated as an absolute path).
filepath = target['filepath']
if filepath[0] == '/':
filepath = filepath[1:]
target_filepath = os.path.join(destination_directory, filepath)
if target_filepath in updated_targetpaths:
continue
# Try one of the algorithm/digest combos for a mismatch. We break
# as soon as we find a mismatch.
for algorithm, digest in six.iteritems(target['fileinfo']['hashes']):
digest_object = None
try:
digest_object = securesystemslib.hash.digest_filename(target_filepath,
algorithm=algorithm)
# This exception would occur if the target does not exist locally.
except IOError:
updated_targets.append(target)
updated_targetpaths.append(target_filepath)
break
# The file does exist locally, check if its hash differs.
if digest_object.hexdigest() != digest:
updated_targets.append(target)
updated_targetpaths.append(target_filepath)
break
return updated_targets
def download_target(self, target, destination_directory):
"""
<Purpose>
Download 'target' and verify it is trusted.
This will only store the file at 'destination_directory' if the
downloaded file matches the description of the file in the trusted
metadata.
<Arguments>
target:
The target to be downloaded. Conformant to
'tuf.formats.TARGETINFO_SCHEMA'.
destination_directory:
The directory to save the downloaded target file.
<Exceptions>
securesystemslib.exceptions.FormatError:
If 'target' is not properly formatted.
tuf.exceptions.NoWorkingMirrorError:
If a target could not be downloaded from any of the mirrors.
Although expected to be rare, there might be OSError exceptions (except
errno.EEXIST) raised when creating the destination directory (if it
doesn't exist).
<Side Effects>
A target file is saved to the local system.
<Returns>
None.
"""
# Do the arguments have the correct format?
# This check ensures the arguments have the appropriate
# number of objects and object types, and that all dict
# keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fail.
tuf.formats.TARGETINFO_SCHEMA.check_match(target)
securesystemslib.formats.PATH_SCHEMA.check_match(destination_directory)
# Extract the target file information.
target_filepath = target['filepath']
trusted_length = target['fileinfo']['length']
trusted_hashes = target['fileinfo']['hashes']
# '_get_target_file()' checks every mirror and returns the first target
# that passes verification.
target_file_object = self._get_target_file(target_filepath, trusted_length,
trusted_hashes)
# We acquired a target file object from a mirror. Move the file into place
# (i.e., locally to 'destination_directory'). Note: join() discards
# 'destination_directory' if 'target_path' contains a leading path
# separator (i.e., is treated as an absolute path).
destination = os.path.join(destination_directory,
target_filepath.lstrip(os.sep))
destination = os.path.abspath(destination)
target_dirpath = os.path.dirname(destination)
# When attempting to create the leaf directory of 'target_dirpath', ignore
# any exceptions raised if the root directory already exists. All other
# exceptions potentially thrown by os.makedirs() are re-raised.
# Note: os.makedirs can raise OSError if the leaf directory already exists
# or cannot be created.
try:
os.makedirs(target_dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
target_file_object.move(destination)
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4126_0 |
crossvul-python_data_bad_4366_1 | """
Custom Authenticator to use Bitbucket OAuth with JupyterHub
"""
import json
import urllib
from tornado.auth import OAuth2Mixin
from tornado import web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from jupyterhub.auth import LocalAuthenticator
from traitlets import Set, default, observe
from .oauth2 import OAuthLoginHandler, OAuthenticator
def _api_headers(access_token):
return {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}".format(access_token),
}
class BitbucketOAuthenticator(OAuthenticator):
_deprecated_aliases = {
"team_whitelist": ("allowed_teams", "0.12.0"),
}
@observe(*list(_deprecated_aliases))
def _deprecated_trait(self, change):
super()._deprecated_trait(change)
login_service = "Bitbucket"
client_id_env = 'BITBUCKET_CLIENT_ID'
client_secret_env = 'BITBUCKET_CLIENT_SECRET'
@default("authorize_url")
def _authorize_url_default(self):
return "https://bitbucket.org/site/oauth2/authorize"
@default("token_url")
def _token_url_default(self):
return "https://bitbucket.org/site/oauth2/access_token"
team_whitelist = Set(help="Deprecated, use `BitbucketOAuthenticator.allowed_teams`", config=True,)
allowed_teams = Set(
config=True, help="Automatically allow members of selected teams"
)
headers = {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}",
}
async def authenticate(self, handler, data=None):
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
grant_type="authorization_code",
code=code,
redirect_uri=self.get_callback_url(handler),
)
url = url_concat("https://bitbucket.org/site/oauth2/access_token", params)
bb_header = {"Content-Type": "application/x-www-form-urlencoded;charset=utf-8"}
req = HTTPRequest(
url,
method="POST",
auth_username=self.client_id,
auth_password=self.client_secret,
body=urllib.parse.urlencode(params).encode('utf-8'),
headers=bb_header,
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
# Determine who the logged in user is
req = HTTPRequest(
"https://api.bitbucket.org/2.0/user",
method="GET",
headers=_api_headers(access_token),
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["username"]
# Check if user is a member of any allowed teams.
# This check is performed here, as the check requires `access_token`.
if self.allowed_teams:
user_in_team = await self._check_membership_allowed_teams(username, access_token)
if not user_in_team:
self.log.warning("%s not in team allowed list of users", username)
return None
return {
'name': username,
'auth_state': {'access_token': access_token, 'bitbucket_user': resp_json},
}
async def _check_membership_allowed_teams(self, username, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
# We verify the team membership by calling teams endpoint.
next_page = url_concat(
"https://api.bitbucket.org/2.0/teams", {'role': 'member'}
)
while next_page:
req = HTTPRequest(next_page, method="GET", headers=headers)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
next_page = resp_json.get('next', None)
user_teams = set([entry["username"] for entry in resp_json["values"]])
# check if any of the organizations seen thus far are in the allowed list
if len(self.allowed_teams & user_teams) > 0:
return True
return False
class LocalBitbucketOAuthenticator(LocalAuthenticator, BitbucketOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4366_1 |
crossvul-python_data_bad_4366_5 | """
Custom Authenticator to use Google OAuth with JupyterHub.
Derived from the GitHub OAuth authenticator.
"""
import os
import json
import urllib.parse
from tornado import gen
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from tornado.auth import GoogleOAuth2Mixin
from tornado.web import HTTPError
from traitlets import Dict, Unicode, List, default, validate, observe
from jupyterhub.crypto import decrypt, EncryptionUnavailable, InvalidToken
from jupyterhub.auth import LocalAuthenticator
from jupyterhub.utils import url_path_join
from .oauth2 import OAuthLoginHandler, OAuthCallbackHandler, OAuthenticator
def check_user_in_groups(member_groups, allowed_groups):
# Check if user is a member of any group in the allowed groups
if any(g in member_groups for g in allowed_groups):
return True # user _is_ in group
else:
return False
class GoogleOAuthenticator(OAuthenticator, GoogleOAuth2Mixin):
_deprecated_aliases = {
"google_group_whitelist": ("allowed_google_groups", "0.12.0"),
}
@observe(*list(_deprecated_aliases))
def _deprecated_trait(self, change):
super()._deprecated_trait(change)
google_api_url = Unicode("https://www.googleapis.com", config=True)
@default('google_api_url')
def _google_api_url(self):
"""get default google apis url from env"""
google_api_url = os.getenv('GOOGLE_API_URL')
# default to googleapis.com
if not google_api_url:
google_api_url = 'https://www.googleapis.com'
return google_api_url
@default('scope')
def _scope_default(self):
return ['openid', 'email']
@default("authorize_url")
def _authorize_url_default(self):
return "https://accounts.google.com/o/oauth2/v2/auth"
@default("token_url")
def _token_url_default(self):
return "%s/oauth2/v4/token" % (self.google_api_url)
google_service_account_keys = Dict(
Unicode(),
help="Service account keys to use with each domain, see https://developers.google.com/admin-sdk/directory/v1/guides/delegation"
).tag(config=True)
gsuite_administrator = Dict(
Unicode(),
help="Username of a G Suite Administrator for the service account to act as"
).tag(config=True)
google_group_whitelist = Dict(help="Deprecated, use `GoogleOAuthenticator.allowed_google_groups`", config=True,)
allowed_google_groups = Dict(
List(Unicode()),
help="Automatically allow members of selected groups"
).tag(config=True)
admin_google_groups = Dict(
List(Unicode()),
help="Groups whose members should have Jupyterhub admin privileges"
).tag(config=True)
user_info_url = Unicode(
"https://www.googleapis.com/oauth2/v1/userinfo", config=True
)
hosted_domain = List(
Unicode(),
config=True,
help="""List of domains used to restrict sign-in, e.g. mycollege.edu""",
)
@default('hosted_domain')
def _hosted_domain_from_env(self):
domains = []
for domain in os.environ.get('HOSTED_DOMAIN', '').split(';'):
if domain:
# check falsy to avoid trailing separators
# adding empty domains
domains.append(domain)
return domains
@validate('hosted_domain')
def _cast_hosted_domain(self, proposal):
"""handle backward-compatibility with hosted_domain is a single domain as a string"""
if isinstance(proposal.value, str):
# pre-0.9 hosted_domain was a string
# set it to a single item list
# (or if it's empty, an empty list)
if proposal.value == '':
return []
return [proposal.value]
return proposal.value
login_service = Unicode(
os.environ.get('LOGIN_SERVICE', 'Google'),
config=True,
help="""Google Apps hosted domain string, e.g. My College""",
)
async def authenticate(self, handler, data=None, google_groups=None):
code = handler.get_argument("code")
body = urllib.parse.urlencode(
dict(
code=code,
redirect_uri=self.get_callback_url(handler),
client_id=self.client_id,
client_secret=self.client_secret,
grant_type="authorization_code",
)
)
http_client = AsyncHTTPClient()
response = await http_client.fetch(
self.token_url,
method="POST",
headers={"Content-Type": "application/x-www-form-urlencoded"},
body=body,
)
user = json.loads(response.body.decode("utf-8", "replace"))
access_token = str(user['access_token'])
refresh_token = user.get('refresh_token', None)
response = await http_client.fetch(
self.user_info_url + '?access_token=' + access_token
)
if not response:
handler.clear_all_cookies()
raise HTTPError(500, 'Google authentication failed')
bodyjs = json.loads(response.body.decode())
user_email = username = bodyjs['email']
user_email_domain = user_email.split('@')[1]
if not bodyjs['verified_email']:
self.log.warning("Google OAuth unverified email attempt: %s", user_email)
raise HTTPError(403, "Google email {} not verified".format(user_email))
if self.hosted_domain:
if user_email_domain not in self.hosted_domain:
self.log.warning(
"Google OAuth unauthorized domain attempt: %s", user_email
)
raise HTTPError(
403,
"Google account domain @{} not authorized.".format(
user_email_domain
),
)
if len(self.hosted_domain) == 1:
# unambiguous domain, use only base name
username = user_email.split('@')[0]
if refresh_token is None:
self.log.debug("Refresh token was empty, will try to pull refresh_token from previous auth_state")
user = handler.find_user(username)
if user:
self.log.debug("encrypted_auth_state was found, will try to decrypt and pull refresh_token from it")
try:
encrypted = user.encrypted_auth_state
auth_state = await decrypt(encrypted)
refresh_token = auth_state.get('refresh_token')
except (ValueError, InvalidToken, EncryptionUnavailable) as e:
self.log.warning(
"Failed to retrieve encrypted auth_state for %s because %s",
username,
e,
)
user_info = {
'name': username,
'auth_state': {
'access_token': access_token,
'refresh_token': refresh_token,
'google_user': bodyjs
}
}
if self.admin_google_groups or self.allowed_google_groups:
user_info = await self._add_google_groups_info(user_info, google_groups)
return user_info
def _service_client_credentials(self, scopes, user_email_domain):
"""
Return a configured service client credentials for the API.
"""
try:
from google.oauth2 import service_account
except:
raise ImportError(
"Could not import google.oauth2's service_account,"
"you may need to run pip install oauthenticator[googlegroups] or not declare google groups"
)
gsuite_administrator_email = "{}@{}".format(self.gsuite_administrator[user_email_domain], user_email_domain)
self.log.debug("scopes are %s, user_email_domain is %s", scopes, user_email_domain)
credentials = service_account.Credentials.from_service_account_file(
self.google_service_account_keys[user_email_domain],
scopes=scopes
)
credentials = credentials.with_subject(gsuite_administrator_email)
return credentials
def _service_client(self, service_name, service_version, credentials, http=None):
"""
Return a configured service client for the API.
"""
try:
from googleapiclient.discovery import build
except:
raise ImportError(
"Could not import googleapiclient.discovery's build,"
"you may need to run pip install oauthenticator[googlegroups] or not declare google groups"
)
self.log.debug("service_name is %s, service_version is %s", service_name, service_version)
return build(
serviceName=service_name,
version=service_version,
credentials=credentials,
cache_discovery=False,
http=http)
async def _google_groups_for_user(self, user_email, credentials, http=None):
"""
Return google groups a given user is a member of
"""
service = self._service_client(
service_name='admin',
service_version='directory_v1',
credentials=credentials,
http=http)
results = service.groups().list(userKey=user_email).execute()
results = [ g['email'].split('@')[0] for g in results.get('groups', [{'email': None}]) ]
self.log.debug("user_email %s is a member of %s", user_email, results)
return results
async def _add_google_groups_info(self, user_info, google_groups=None):
user_email_domain=user_info['auth_state']['google_user']['hd']
user_email=user_info['auth_state']['google_user']['email']
if google_groups is None:
credentials = self._service_client_credentials(
scopes=['%s/auth/admin.directory.group.readonly' % (self.google_api_url)],
user_email_domain=user_email_domain)
google_groups = await self._google_groups_for_user(
user_email=user_email,
credentials=credentials)
user_info['auth_state']['google_user']['google_groups'] = google_groups
# Check if user is a member of any admin groups.
if self.admin_google_groups:
is_admin = check_user_in_groups(google_groups, self.admin_google_groups[user_email_domain])
# Check if user is a member of any allowed groups.
user_in_group = check_user_in_groups(google_groups, self.allowed_google_groups[user_email_domain])
if self.admin_google_groups and (is_admin or user_in_group):
user_info['admin'] = is_admin
return user_info
elif user_in_group:
return user_info
else:
return None
class LocalGoogleOAuthenticator(LocalAuthenticator, GoogleOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4366_5 |
crossvul-python_data_bad_4366_3 | """
Authenticator to use GitHub OAuth with JupyterHub
"""
import json
import os
import re
import string
import warnings
from tornado.auth import OAuth2Mixin
from tornado import web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient, HTTPError
from jupyterhub.auth import LocalAuthenticator
from traitlets import List, Set, Unicode, default, observe
from .common import next_page_from_links
from .oauth2 import OAuthLoginHandler, OAuthenticator
def _api_headers(access_token):
return {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "token {}".format(access_token),
}
class GitHubOAuthenticator(OAuthenticator):
# see github_scopes.md for details about scope config
# set scopes via config, e.g.
# c.GitHubOAuthenticator.scope = ['read:org']
_deprecated_aliases = {
"github_organization_whitelist": ("allowed_organizations", "0.12.0"),
}
@observe(*list(_deprecated_aliases))
def _deprecated_trait(self, change):
super()._deprecated_trait(change)
login_service = "GitHub"
github_url = Unicode("https://github.com", config=True)
@default("github_url")
def _github_url_default(self):
github_url = os.environ.get("GITHUB_URL")
if not github_url:
# fallback on older GITHUB_HOST config,
# treated the same as GITHUB_URL
host = os.environ.get("GITHUB_HOST")
if host:
if os.environ.get("GITHUB_HTTP"):
protocol = "http"
warnings.warn(
'Use of GITHUB_HOST with GITHUB_HTTP might be deprecated in the future. '
'Use GITHUB_URL=http://{} to set host and protocol together.'.format(
host
),
PendingDeprecationWarning,
)
else:
protocol = "https"
github_url = "{}://{}".format(protocol, host)
if github_url:
if '://' not in github_url:
# ensure protocol is included, assume https if missing
github_url = 'https://' + github_url
return github_url
else:
# nothing specified, this is the true default
github_url = "https://github.com"
# ensure no trailing slash
return github_url.rstrip("/")
github_api = Unicode("https://api.github.com", config=True)
@default("github_api")
def _github_api_default(self):
if self.github_url == "https://github.com":
return "https://api.github.com"
else:
return self.github_url + "/api/v3"
@default("authorize_url")
def _authorize_url_default(self):
return "%s/login/oauth/authorize" % (self.github_url)
@default("token_url")
def _token_url_default(self):
return "%s/login/oauth/access_token" % (self.github_url)
# deprecated names
github_client_id = Unicode(config=True, help="DEPRECATED")
def _github_client_id_changed(self, name, old, new):
self.log.warning("github_client_id is deprecated, use client_id")
self.client_id = new
github_client_secret = Unicode(config=True, help="DEPRECATED")
def _github_client_secret_changed(self, name, old, new):
self.log.warning("github_client_secret is deprecated, use client_secret")
self.client_secret = new
client_id_env = 'GITHUB_CLIENT_ID'
client_secret_env = 'GITHUB_CLIENT_SECRET'
github_organization_whitelist = Set(help="Deprecated, use `GitHubOAuthenticator.allowed_organizations`", config=True,)
allowed_organizations = Set(
config=True, help="Automatically allow members of selected organizations"
)
async def authenticate(self, handler, data=None):
"""We set up auth_state based on additional GitHub info if we
receive it.
"""
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a GitHub Access Token
#
# See: https://developer.github.com/v3/oauth/
# GitHub specifies a POST request yet requires URL parameters
params = dict(
client_id=self.client_id, client_secret=self.client_secret, code=code
)
url = url_concat(self.token_url, params)
req = HTTPRequest(
url,
method="POST",
headers={"Accept": "application/json"},
body='', # Body is required for a POST...
validate_cert=self.validate_server_cert,
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
if 'access_token' in resp_json:
access_token = resp_json['access_token']
elif 'error_description' in resp_json:
raise HTTPError(
403,
"An access token was not returned: {}".format(
resp_json['error_description']
),
)
else:
raise HTTPError(500, "Bad response: {}".format(resp))
# Determine who the logged in user is
req = HTTPRequest(
self.github_api + "/user",
method="GET",
headers=_api_headers(access_token),
validate_cert=self.validate_server_cert,
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["login"]
# username is now the GitHub userid.
if not username:
return None
# Check if user is a member of any allowed organizations.
# This check is performed here, as it requires `access_token`.
if self.allowed_organizations:
for org in self.allowed_organizations:
user_in_org = await self._check_membership_allowed_organizations(
org, username, access_token
)
if user_in_org:
break
else: # User not found in member list for any organisation
self.log.warning("User %s is not in allowed org list", username)
return None
userdict = {"name": username}
# Now we set up auth_state
userdict["auth_state"] = auth_state = {}
# Save the access token and full GitHub reply (name, id, email) in auth state
# These can be used for user provisioning in the Lab/Notebook environment.
# e.g.
# 1) stash the access token
# 2) use the GitHub ID as the id
# 3) set up name/email for .gitconfig
auth_state['access_token'] = access_token
# store the whole user model in auth_state.github_user
auth_state['github_user'] = resp_json
# A public email will return in the initial query (assuming default scope).
# Private will not.
return userdict
async def _check_membership_allowed_organizations(self, org, username, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
# Check membership of user `username` for organization `org` via api [check-membership](https://developer.github.com/v3/orgs/members/#check-membership)
# With empty scope (even if authenticated by an org member), this
# will only await public org members. You want 'read:org' in order
# to be able to iterate through all members.
check_membership_url = "%s/orgs/%s/members/%s" % (
self.github_api,
org,
username,
)
req = HTTPRequest(
check_membership_url,
method="GET",
headers=headers,
validate_cert=self.validate_server_cert,
)
self.log.debug(
"Checking GitHub organization membership: %s in %s?", username, org
)
resp = await http_client.fetch(req, raise_error=False)
print(resp)
if resp.code == 204:
self.log.info("Allowing %s as member of %s", username, org)
return True
else:
try:
resp_json = json.loads((resp.body or b'').decode('utf8', 'replace'))
message = resp_json.get('message', '')
except ValueError:
message = ''
self.log.debug(
"%s does not appear to be a member of %s (status=%s): %s",
username,
org,
resp.code,
message,
)
return False
class LocalGitHubOAuthenticator(LocalAuthenticator, GitHubOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| ./CrossVul/dataset_final_sorted/CWE-863/py/bad_4366_3 |
crossvul-python_data_good_1730_0 | # -*- coding: utf-8 -*-
'''
Management of user accounts
===========================
The user module is used to create and manage user settings, users can be set
as either absent or present
.. code-block:: yaml
fred:
user.present:
- fullname: Fred Jones
- shell: /bin/zsh
- home: /home/fred
- uid: 4000
- gid: 4000
- groups:
- wheel
- storage
- games
testuser:
user.absent
'''
# Import python libs
import logging
import os
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def _group_changes(cur, wanted, remove=False):
'''
Determine if the groups need to be changed
'''
old = set(cur)
new = set(wanted)
if (remove and old != new) or (not remove and not new.issubset(old)):
return True
return False
def _changes(name,
uid=None,
gid=None,
groups=None,
optional_groups=None,
remove_groups=True,
home=None,
createhome=True,
password=None,
enforce_password=True,
empty_password=False,
shell=None,
fullname='',
roomnumber='',
workphone='',
homephone='',
date=0,
mindays=0,
maxdays=999999,
inactdays=0,
warndays=7,
expire=-1):
'''
Return a dict of the changes required for a user if the user is present,
otherwise return False.
Updated in 2014.7.0 to include support for shadow attributes, all
attributes supported as integers only.
'''
if 'shadow.info' in __salt__:
lshad = __salt__['shadow.info'](name)
lusr = __salt__['user.info'](name)
if not lusr:
return False
change = {}
wanted_groups = sorted(set((groups or []) + (optional_groups or [])))
if uid:
if lusr['uid'] != uid:
change['uid'] = uid
if gid is not None:
if lusr['gid'] not in (gid, __salt__['file.group_to_gid'](gid)):
change['gid'] = gid
default_grp = __salt__['file.gid_to_group'](
gid if gid is not None else lusr['gid']
)
# remove the default group from the list for comparison purposes
if default_grp in lusr['groups']:
lusr['groups'].remove(default_grp)
if name in lusr['groups'] and name not in wanted_groups:
lusr['groups'].remove(name)
# remove default group from wanted_groups, as this requirement is
# already met
if default_grp in wanted_groups:
wanted_groups.remove(default_grp)
if _group_changes(lusr['groups'], wanted_groups, remove_groups):
change['groups'] = wanted_groups
if home:
if lusr['home'] != home:
change['home'] = home
if createhome:
newhome = home if home else lusr['home']
if newhome is not None and not os.path.isdir(newhome):
change['homeDoesNotExist'] = newhome
if shell:
if lusr['shell'] != shell:
change['shell'] = shell
if 'shadow.info' in __salt__ and 'shadow.default_hash' in __salt__:
if password:
default_hash = __salt__['shadow.default_hash']()
if lshad['passwd'] == default_hash \
or lshad['passwd'] != default_hash and enforce_password:
if lshad['passwd'] != password:
change['passwd'] = password
if date and date is not 0 and lshad['lstchg'] != date:
change['date'] = date
if mindays and mindays is not 0 and lshad['min'] != mindays:
change['mindays'] = mindays
if maxdays and maxdays is not 999999 and lshad['max'] != maxdays:
change['maxdays'] = maxdays
if inactdays and inactdays is not 0 and lshad['inact'] != inactdays:
change['inactdays'] = inactdays
if warndays and warndays is not 7 and lshad['warn'] != warndays:
change['warndays'] = warndays
if expire and expire is not -1 and lshad['expire'] != expire:
change['expire'] = expire
# GECOS fields
if fullname is not None and lusr['fullname'] != fullname:
change['fullname'] = fullname
# MacOS doesn't have full GECOS support, so check for the "ch" functions
# and ignore these parameters if these functions do not exist.
if 'user.chroomnumber' in __salt__:
if roomnumber is not None and lusr['roomnumber'] != roomnumber:
change['roomnumber'] = roomnumber
if 'user.chworkphone' in __salt__:
if workphone is not None and lusr['workphone'] != workphone:
change['workphone'] = workphone
if 'user.chhomephone' in __salt__:
if homephone is not None and lusr['homephone'] != homephone:
change['homephone'] = homephone
return change
def present(name,
uid=None,
gid=None,
gid_from_name=False,
groups=None,
optional_groups=None,
remove_groups=True,
home=None,
createhome=True,
password=None,
enforce_password=True,
empty_password=False,
shell=None,
unique=True,
system=False,
fullname=None,
roomnumber=None,
workphone=None,
homephone=None,
date=None,
mindays=None,
maxdays=None,
inactdays=None,
warndays=None,
expire=None):
'''
Ensure that the named user is present with the specified properties
name
The name of the user to manage
uid
The user id to assign, if left empty then the next available user id
will be assigned
gid
The default group id
gid_from_name
If True, the default group id will be set to the id of the group with
the same name as the user.
groups
A list of groups to assign the user to, pass a list object. If a group
specified here does not exist on the minion, the state will fail.
If set to the empty list, the user will be removed from all groups
except the default group.
optional_groups
A list of groups to assign the user to, pass a list object. If a group
specified here does not exist on the minion, the state will silently
ignore it.
NOTE: If the same group is specified in both "groups" and
"optional_groups", then it will be assumed to be required and not optional.
remove_groups
Remove groups that the user is a member of that weren't specified in
the state, True by default
home
The custom login directory of user. Uses default value of underlying
system if not set. Notice that this directory does not have to exists.
This also the location of the home directory to create if createhome is
set to True.
createhome
If True, the home directory will be created if it doesn't exist.
Please note that directories leading up to the home directory
will NOT be created.
password
A password hash to set for the user. This field is only supported on
Linux, FreeBSD, NetBSD, OpenBSD, and Solaris.
.. versionchanged:: 0.16.0
BSD support added.
enforce_password
Set to False to keep the password from being changed if it has already
been set and the password hash differs from what is specified in the
"password" field. This option will be ignored if "password" is not
specified.
empty_password
Set to True to enable no password-less login for user
shell
The login shell, defaults to the system default shell
unique
Require a unique UID, True by default
system
Choose UID in the range of FIRST_SYSTEM_UID and LAST_SYSTEM_UID.
User comment field (GECOS) support (currently Linux, FreeBSD, and MacOS
only):
The below values should be specified as strings to avoid ambiguities when
the values are loaded. (Especially the phone and room number fields which
are likely to contain numeric data)
fullname
The user's full name
roomnumber
The user's room number (not supported in MacOS)
workphone
The user's work phone number (not supported in MacOS)
homephone
The user's home phone number (not supported in MacOS)
.. versionchanged:: 2014.7.0
Shadow attribute support added.
Shadow attributes support (currently Linux only):
The below values should be specified as integers.
date
Date of last change of password, represented in days since epoch
(January 1, 1970).
mindays
The minimum number of days between password changes.
maxdays
The maximum number of days between password changes.
inactdays
The number of days after a password expires before an account is
locked.
warndays
Number of days prior to maxdays to warn users.
expire
Date that account expires, represented in days since epoch (January 1,
1970).
'''
fullname = salt.utils.sdecode(fullname) if fullname is not None else fullname
roomnumber = salt.utils.sdecode(roomnumber) if roomnumber is not None else roomnumber
workphone = salt.utils.sdecode(workphone) if workphone is not None else workphone
homephone = salt.utils.sdecode(homephone) if homephone is not None else homephone
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0} is present and up to date'.format(name)}
if groups:
missing_groups = [x for x in groups if not __salt__['group.info'](x)]
if missing_groups:
ret['comment'] = 'The following group(s) are not present: ' \
'{0}'.format(','.join(missing_groups))
ret['result'] = False
return ret
if optional_groups:
present_optgroups = [x for x in optional_groups
if __salt__['group.info'](x)]
for missing_optgroup in [x for x in optional_groups
if x not in present_optgroups]:
log.debug('Optional group "{0}" for user "{1}" is not '
'present'.format(missing_optgroup, name))
else:
present_optgroups = None
# Log a warning for all groups specified in both "groups" and
# "optional_groups" lists.
if groups and optional_groups:
for isected in set(groups).intersection(optional_groups):
log.warning('Group "{0}" specified in both groups and '
'optional_groups for user {1}'.format(isected, name))
if gid_from_name:
gid = __salt__['file.group_to_gid'](name)
if empty_password:
__salt__['shadow.del_password'](name)
changes = _changes(name,
uid,
gid,
groups,
present_optgroups,
remove_groups,
home,
createhome,
password,
enforce_password,
empty_password,
shell,
fullname,
roomnumber,
workphone,
homephone,
date,
mindays,
maxdays,
inactdays,
warndays,
expire)
if changes:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('The following user attributes are set to be '
'changed:\n')
for key, val in changes.items():
if key == 'password':
val = 'XXX-REDACTED-XXX'
ret['comment'] += '{0}: {1}\n'.format(key, val)
return ret
# The user is present
if 'shadow.info' in __salt__:
lshad = __salt__['shadow.info'](name)
pre = __salt__['user.info'](name)
for key, val in changes.items():
if key == 'passwd' and not empty_password:
__salt__['shadow.set_password'](name, password)
continue
if key == 'date':
__salt__['shadow.set_date'](name, date)
continue
# run chhome once to avoid any possible bad side-effect
if key == 'home' and 'homeDoesNotExist' not in changes:
__salt__['user.chhome'](name, val, False)
continue
if key == 'homeDoesNotExist':
__salt__['user.chhome'](name, val, True)
if not os.path.isdir(val):
__salt__['file.mkdir'](val, pre['uid'], pre['gid'], 0755)
continue
if key == 'mindays':
__salt__['shadow.set_mindays'](name, mindays)
continue
if key == 'maxdays':
__salt__['shadow.set_maxdays'](name, maxdays)
continue
if key == 'inactdays':
__salt__['shadow.set_inactdays'](name, inactdays)
continue
if key == 'warndays':
__salt__['shadow.set_warndays'](name, warndays)
continue
if key == 'expire':
__salt__['shadow.set_expire'](name, expire)
continue
if key == 'groups':
__salt__['user.ch{0}'.format(key)](
name, val, not remove_groups
)
else:
__salt__['user.ch{0}'.format(key)](name, val)
post = __salt__['user.info'](name)
spost = {}
if 'shadow.info' in __salt__:
if lshad['passwd'] != password:
spost = __salt__['shadow.info'](name)
# See if anything changed
for key in post:
if post[key] != pre[key]:
ret['changes'][key] = post[key]
if 'shadow.info' in __salt__:
for key in spost:
if lshad[key] != spost[key]:
ret['changes'][key] = spost[key]
if ret['changes']:
ret['comment'] = 'Updated user {0}'.format(name)
changes = _changes(name,
uid,
gid,
groups,
present_optgroups,
remove_groups,
home,
createhome,
password,
enforce_password,
empty_password,
shell,
fullname,
roomnumber,
workphone,
homephone,
date,
mindays,
maxdays,
inactdays,
warndays,
expire)
if changes:
ret['comment'] = 'These values could not be changed: {0}'.format(
changes
)
ret['result'] = False
return ret
if changes is False:
# The user is not present, make it!
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} set to be added'.format(name)
return ret
if groups and present_optgroups:
groups.extend(present_optgroups)
elif present_optgroups:
groups = present_optgroups[:]
if __salt__['user.add'](name,
uid=uid,
gid=gid,
groups=groups,
home=home,
shell=shell,
unique=unique,
system=system,
fullname=fullname,
roomnumber=roomnumber,
workphone=workphone,
homephone=homephone,
createhome=createhome):
ret['comment'] = 'New user {0} created'.format(name)
ret['changes'] = __salt__['user.info'](name)
if 'shadow.info' in __salt__ and not salt.utils.is_windows():
if password and not empty_password:
__salt__['shadow.set_password'](name, password)
spost = __salt__['shadow.info'](name)
if spost['passwd'] != password:
ret['comment'] = 'User {0} created but failed to set' \
' password to' \
' {1}'.format(name, 'XXX-REDACTED-XXX')
ret['result'] = False
ret['changes']['password'] = 'XXX-REDACTED-XXX'
if date:
__salt__['shadow.set_date'](name, date)
spost = __salt__['shadow.info'](name)
if spost['lstchg'] != date:
ret['comment'] = 'User {0} created but failed to set' \
' last change date to' \
' {1}'.format(name, date)
ret['result'] = False
ret['changes']['date'] = date
if mindays:
__salt__['shadow.set_mindays'](name, mindays)
spost = __salt__['shadow.info'](name)
if spost['min'] != mindays:
ret['comment'] = 'User {0} created but failed to set' \
' minimum days to' \
' {1}'.format(name, mindays)
ret['result'] = False
ret['changes']['mindays'] = mindays
if maxdays:
__salt__['shadow.set_maxdays'](name, maxdays)
spost = __salt__['shadow.info'](name)
if spost['max'] != maxdays:
ret['comment'] = 'User {0} created but failed to set' \
' maximum days to' \
' {1}'.format(name, maxdays)
ret['result'] = False
ret['changes']['maxdays'] = maxdays
if inactdays:
__salt__['shadow.set_inactdays'](name, inactdays)
spost = __salt__['shadow.info'](name)
if spost['inact'] != inactdays:
ret['comment'] = 'User {0} created but failed to set' \
' inactive days to' \
' {1}'.format(name, inactdays)
ret['result'] = False
ret['changes']['inactdays'] = inactdays
if warndays:
__salt__['shadow.set_warndays'](name, warndays)
spost = __salt__['shadow.info'](name)
if spost['warn'] != warndays:
ret['comment'] = 'User {0} created but failed to set' \
' warn days to' \
' {1}'.format(name, warndays)
ret['result'] = False
ret['changes']['warndays'] = warndays
if expire:
__salt__['shadow.set_expire'](name, expire)
spost = __salt__['shadow.info'](name)
if spost['expire'] != expire:
ret['comment'] = 'User {0} created but failed to set' \
' expire days to' \
' {1}'.format(name, expire)
ret['result'] = False
ret['changes']['expire'] = expire
elif salt.utils.is_windows():
if password and not empty_password:
if not __salt__['user.setpassword'](name, password):
ret['comment'] = 'User {0} created but failed to set' \
' password to' \
' {1}'.format(name, 'XXX-REDACTED-XXX')
ret['result'] = False
ret['changes']['passwd'] = 'XXX-REDACTED-XXX'
else:
ret['comment'] = 'Failed to create new user {0}'.format(name)
ret['result'] = False
return ret
def absent(name, purge=False, force=False):
'''
Ensure that the named user is absent
name
The name of the user to remove
purge
Set purge to delete all of the user's files as well as the user
force
If the user is logged in the absent state will fail, set the force
option to True to remove the user even if they are logged in. Not
supported in FreeBSD and Solaris.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
lusr = __salt__['user.info'](name)
if lusr:
# The user is present, make it not present
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} set for removal'.format(name)
return ret
beforegroups = set(salt.utils.get_group_list(name))
ret['result'] = __salt__['user.delete'](name, purge, force)
aftergroups = set([g for g in beforegroups if __salt__['group.info'](g)])
if ret['result']:
ret['changes'] = {}
for g in beforegroups - aftergroups:
ret['changes']['{0} group'.format(g)] = 'removed'
ret['changes'][name] = 'removed'
ret['comment'] = 'Removed user {0}'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to remove user {0}'.format(name)
return ret
ret['comment'] = 'User {0} is not present'.format(name)
return ret
| ./CrossVul/dataset_final_sorted/CWE-534/py/good_1730_0 |
crossvul-python_data_bad_1730_0 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-534/py/bad_1730_0 |
crossvul-python_data_good_5519_0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2015, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# Command-line interface ######################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to _main() is at the end of the file.
def _cli_parse(args): # pragma: no coverage
from argparse import ArgumentParser
parser = ArgumentParser(usage="usage: %sprog [options] package.module:app")
opt = parser.add_argument
opt('app', help='WSGI app entry point.')
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
cli_args = parser.parse_args(args)
return cli_args, parser
def _cli_patch(cli_args): # pragma: no coverage
parsed_args, _ = _cli_parse(cli_args)
opts = parsed_args
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ##########################################
###############################################################################
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, tempfile, threading, time, warnings, weakref, hashlib
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
from json import dumps as json_dumps, loads as json_lds
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(param.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
try:
from inspect import getfullargspec
def getargspec(func):
spec = getfullargspec(func)
kwargs = makelist(spec[0]) + makelist(spec.kwonlyargs)
return kwargs, spec[1], spec[2], spec[3]
except ImportError:
from inspect import getargspec
py3k = sys.version_info.major > 2
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
import configparser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
import ConfigParser as configparser
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
exec(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(major, minor, cause, fix):
text = "Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\n"\
"Cause: %s\n"\
"Fix: %s\n" % (major, minor, cause, fix)
if DEBUG == 'strict':
raise DeprecationWarning(text)
warnings.warn(text, DeprecationWarning, stacklevel=3)
return DeprecationWarning(text)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
update_wrapper(self, func)
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events #######################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if g[2] is not None:
depr(0, 13, "Use of old route syntax.",
"Use <name> instead of :name in routes.")
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError as E:
raise RouteBuildError('Missing URL argument: %r' % E.args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = app.config._make_overlay()
self.config.load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
depr(0, 13, "Route.get_config() is deprectated.",
"The Route.config property already includes values from the"
" application config for missing keys. Access it directly.")
return self.config.get(key, default)
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
@lazy_attribute
def _global_config(cls):
cfg = ConfigDict()
cfg.meta_set('catchall', 'validate', bool)
return cfg
def __init__(self, **kwargs):
#: A :class:`ConfigDict` for app specific configuration.
self.config = self._global_config._make_overlay()
self.config._add_change_listener(
functools.partial(self.trigger_hook, 'config'))
self.config.update({
"catchall": True
})
if kwargs.get('catchall') is False:
depr(0,13, "Bottle(catchall) keyword argument.",
"The 'catchall' setting is now part of the app "
"configuration. Fix: `app.config['catchall'] = False`")
self.config['catchall'] = False
if kwargs.get('autojson') is False:
depr(0, 13, "Bottle(autojson) keyword argument.",
"The 'autojson' setting is now part of the app "
"configuration. Fix: `app.config['json.enable'] = False`")
self.config['json.disable'] = True
self._mounts = []
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = {'after_request'}
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def _mount_wsgi(self, prefix, app, **options):
segments = [p for p in prefix.split('/') if p]
if not segments:
raise ValueError('WSGI applications cannot be mounted to "/".')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def _mount_app(self, prefix, app, **options):
if app in self._mounts or '_mount.app' in app.config:
depr(0, 13, "Application mounted multiple times. Falling back to WSGI mount.",
"Clone application before mounting to a different location.")
return self._mount_wsgi(prefix, app, **options)
if options:
depr(0, 13, "Unsupported mount options. Falling back to WSGI mount.",
"Do not specify any route options when mounting bottle application.")
return self._mount_wsgi(prefix, app, **options)
if not prefix.endswith("/"):
depr(0, 13, "Prefix must end in '/'. Falling back to WSGI mount.",
"Consider adding an explicit redirect from '/prefix' to '/prefix/' in the parent application.")
return self._mount_wsgi(prefix, app, **options)
self._mounts.append(app)
app.config['_mount.prefix'] = prefix
app.config['_mount.app'] = self
for route in app.routes:
route.rule = prefix + route.rule.lstrip('/')
self.add_route(route)
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
parent_app.mount('/prefix/', child_app)
:param prefix: path prefix or `mount-point`.
:param app: an instance of :class:`Bottle` or a WSGI application.
Plugins from the parent application are not applied to the routes
of the mounted child application. If you need plugins in the child
application, install them separately.
While it is possible to use path wildcards within the prefix path
(:class:`Bottle` childs only), it is highly discouraged.
The prefix path must end with a slash. If you want to access the
root of the child application via `/prefix` in addition to
`/prefix/`, consider adding a route with a 307 redirect to the
parent application.
"""
if not prefix.startswith('/'):
raise ValueError("Prefix must start with '/'")
if isinstance(app, Bottle):
return self._mount_app(prefix, app, **options)
else:
return self._mount_wsgi(prefix, app, **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res, template_settings=dict(name='__ERROR_PAGE_TEMPLATE')))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8', 'ignore')
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
while True: # Remove in 0.14 together with RouteReset
out = None
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
out = route.call(**args)
break
except HTTPResponse as E:
out = E
break
except RouteReset:
depr(0, 13, "RouteReset exception deprecated",
"Call route.call() after route.reset() and "
"return the result.")
route.reset()
continue
finally:
if isinstance(out, HTTPResponse):
out.apply(response)
try:
self.trigger_hook('after_request')
except HTTPResponse as E:
out = E
out.apply(response)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
environ['wsgi.errors'].flush()
out = HTTPError(500, "Internal Server Error", E, stacktrace)
out.apply(response)
return out
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse as E:
first = E
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as error:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', error, format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(E)), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
environ['wsgi.errors'].flush()
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError("Attribute %s already defined. Plugin conflict?" % name)
self.__dict__[name] = value
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json`` or
``application/json-rpc``, this property holds the parsed content
of the request body. Only requests smaller than :attr:`MEMFILE_MAX`
are processed to avoid memory exhaustion.
Invalid JSON raises a 400 error response.
"""
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype in ('application/json', 'application/json-rpc'):
b = self._get_body_string()
if not b:
return None
try:
return json_loads(b)
except (ValueError, TypeError):
raise HTTPError(400, 'Invalid JSON')
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
key = 'bottle.request.ext.%s' % name
if key in self.environ:
raise AttributeError("Attribute already defined: %s" % name)
self.environ[key] = value
def __delattr__(self, name, value):
try:
del self.environ['bottle.request.ext.%s' % name]
except KeyError:
raise AttributeError("Attribute not defined: %s" % name)
def _hkey(key):
if '\n' in key or '\r' in key or '\0' in key:
raise ValueError("Header names must not contain control characters: %r" % key)
return key.title().replace('_', '-')
def _hval(value):
value = value if isinstance(value, unicode) else str(value)
if '\n' in value or '\r' in value or '\0' in value:
raise ValueError("Header value must not contain control characters: %r" % value)
return value
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: frozenset(('Content-Type', 'Content-Length')),
304: frozenset(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else
str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode)
else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(
value if isinstance(value, unicode) else str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
return [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
else:
return [(k, v.encode('utf8') if isinstance(v, unicode) else v)
for (k, v) in out]
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
# Cookie size plus options must not exceed 4kb.
if len(name) + len(value) > 3800:
raise ValueError('Content does not fit into a cookie.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **more_headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def setup(self, app):
app.config._define('json.enable', default=True, validate=bool,
help="Enable or disable automatic dict->json filter.")
app.config._define('json.ascii', default=False, validate=bool,
help="Use only 7-bit ASCII characters in output.")
app.config._define('json.indent', default=True, validate=bool,
help="Add whitespace to make json more readable.")
app.config._define('json.dump_func', default=None,
help="If defined, use this function to transform"
" dict into json. The other options no longer"
" apply.")
def apply(self, callback, route):
dumps = self.json_dumps
if not self.json_dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPResponse as resp:
rv = resp
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def setup(self, app):
app.tpl = self
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [_hval(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(_hval(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [_hval(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in (_hkey(n) for n in names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
_UNSET = object()
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, overlays and more.
This dict-like class is heavily optimized for read access. All read-only
methods as well as item access should be as fast as the built-in dict.
"""
__slots__ = ('_meta', '_change_listener', '_overlays', '_virtual_keys', '_source', '__weakref__')
def __init__(self):
self._meta = {}
self._change_listener = []
#: Weak references of overlays that need to be kept in sync.
self._overlays = []
#: Config that is the source for this overlay.
self._source = None
#: Keys of values copied from the source (values we do not own)
self._virtual_keys = set()
def load_module(self, path, squash=True):
"""Load values from a Python module.
Example modue ``config.py``::
DEBUG = True
SQLITE = {
"db": ":memory:"
}
>>> c = ConfigDict()
>>> c.load_module('config')
{DEBUG: True, 'SQLITE.DB': 'memory'}
>>> c.load_module("config", False)
{'DEBUG': True, 'SQLITE': {'DB': 'memory'}}
:param squash: If true (default), dictionary values are assumed to
represent namespaces (see :meth:`load_dict`).
"""
config_obj = load(path)
obj = {key: getattr(config_obj, key) for key in dir(config_obj)
if key.isupper()}
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self
def load_config(self, filename, **options):
""" Load values from an ``*.ini`` style config file.
A configuration file consists of sections, each led by a
``[section]`` header, followed by key/value entries separated by
either ``=`` or ``:``. Section names and keys are case-insensitive.
Leading and trailing whitespace is removed from keys and values.
Values can be omitted, in which case the key/value delimiter may
also be left out. Values can also span multiple lines, as long as
they are indented deeper than the first line of the value. Commends
are prefixed by ``#`` or ``;`` and may only appear on their own on
an otherwise empty line.
Both section and key names may contain dots (``.``) as namespace
separators. The actual configuration parameter name is constructed
by joining section name and key name together and converting to
lower case.
The special sections ``bottle`` and ``ROOT`` refer to the root
namespace and the ``DEFAULT`` section defines default values for all
other sections.
With Python 3, extended string interpolation is enabled.
:param filename: The path of a config file, or a list of paths.
:param options: All keyword parameters are passed to the underlying
:class:`python:configparser.ConfigParser` constructor call.
"""
options.setdefault('allow_no_value', True)
if py3k:
options.setdefault('interpolation',
configparser.ExtendedInterpolation())
conf = configparser.ConfigParser(**options)
conf.read(filename)
for section in conf.sections():
for key in conf.options(section):
value = conf.get(section, key)
if section not in ['bottle', 'ROOT']:
key = section + '.' + key
self[key.lower()] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, basestring):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
>>> c = ConfigDict()
>>> c.update('some.namespace', key='value')
"""
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
self._virtual_keys.discard(key)
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
for overlay in self._iter_overlays():
overlay._set_virtual(key, value)
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
if key in self._virtual_keys:
raise KeyError("Virtual keys cannot be deleted: %s" % key)
if self._source and key in self._source:
# Not virtual, but present in source -> Restore virtual value
dict.__delitem__(self, key)
self._set_virtual(key, self._source[key])
else: # not virtual, not present in source. This is OUR value
self._on_change(key, None)
dict.__delitem__(self, key)
for overlay in self._iter_overlays():
overlay._delete_virtual(key)
def _set_virtual(self, key, value):
""" Recursively set or update virtual keys. Do nothing if non-virtual
value is present. """
if key in self and key not in self._virtual_keys:
return # Do nothing for non-virtual keys.
self._virtual_keys.add(key)
if key in self and self[key] is not value:
self._on_change(key, value)
dict.__setitem__(self, key, value)
for overlay in self._iter_overlays():
overlay._set_virtual(key, value)
def _delete_virtual(self, key):
""" Recursively delete virtual entry. Do nothing if key is not virtual.
"""
if key not in self._virtual_keys:
return # Do nothing for non-virtual keys.
if key in self:
self._on_change(key, None)
dict.__delitem__(self, key)
self._virtual_keys.discard(key)
for overlay in self._iter_overlays():
overlay._delete_virtual(key)
def _on_change(self, key, value):
for cb in self._change_listener:
if cb(self, key, value):
return True
def _add_change_listener(self, func):
self._change_listener.append(func)
return func
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. """
self._meta.setdefault(key, {})[metafield] = value
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
def _define(self, key, default=_UNSET, help=_UNSET, validate=_UNSET):
""" (Unstable) Shortcut for plugins to define own config parameters. """
if default is not _UNSET:
self.setdefault(key, default)
if help is not _UNSET:
self.meta_set(key, 'help', help)
if validate is not _UNSET:
self.meta_set(key, 'validate', validate)
def _iter_overlays(self):
for ref in self._overlays:
overlay = ref()
if overlay is not None:
yield overlay
def _make_overlay(self):
""" (Unstable) Create a new overlay that acts like a chained map: Values
missing in the overlay are copied from the source map. Both maps
share the same meta entries.
Entries that were copied from the source are called 'virtual'. You
can not delete virtual keys, but overwrite them, which turns them
into non-virtual entries. Setting keys on an overlay never affects
its source, but may affect any number of child overlays.
Other than collections.ChainMap or most other implementations, this
approach does not resolve missing keys on demand, but instead
actively copies all values from the source to the overlay and keeps
track of virtual and non-virtual keys internally. This removes any
lookup-overhead. Read-access is as fast as a build-in dict for both
virtual and non-virtual keys.
Changes are propagated recursively and depth-first. A failing
on-change handler in an overlay stops the propagation of virtual
values and may result in an partly updated tree. Take extra care
here and make sure that on-change handlers never fail.
Used by Route.config
"""
# Cleanup dead references
self._overlays[:] = [ref for ref in self._overlays if ref() is not None]
overlay = ConfigDict()
overlay._meta = self._meta
overlay._source = self
self._overlays.append(weakref.ref(overlay))
for key in self:
overlay._set_virtual(key, self[key])
return overlay
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self.default
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
new_app = push
@property
def default(self):
try:
return self[-1]
except IndexError:
return self.push()
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype=True,
download=False,
charset='UTF-8',
etag=None):
""" Open a file in a safe way and return an instance of :exc:`HTTPResponse`
that can be sent back to the client.
:param filename: Name or path of the file to send, relative to ``root``.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Provide the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset for files with a ``text/*`` mime-type.
(default: UTF-8)
:param etag: Provide a pre-computed ETag header. If set to ``False``,
ETag handling is disabled. (default: auto-generate ETag header)
While checking user input is always a good idea, this function provides
additional protection against malicious ``filename`` parameters from
breaking out of the ``root`` directory and leaking sensitive information
to an attacker.
Read-protected files or files outside of the ``root`` directory are
answered with ``403 Access Denied``. Missing files result in a
``404 Not Found`` response. Conditional requests (``If-Modified-Since``,
``If-None-Match``) are answered with ``304 Not Modified`` whenever
possible. ``HEAD`` and ``Range`` requests (used by download managers to
check or continue partial downloads) are also handled automatically.
"""
root = os.path.join(os.path.abspath(root), '')
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype is True:
if download and download is not True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if (mimetype[:5] == 'text/' or mimetype == 'application/javascript')\
and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download is True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
getenv = request.environ.get
if etag is None:
etag = '%d:%d:%d:%d:%s' % (stats.st_dev, stats.st_ino, stats.st_mtime,
clen, filename)
etag = hashlib.sha1(tob(etag)).hexdigest()
if etag:
headers['ETag'] = etag
check = getenv('HTTP_IF_NONE_MATCH')
if check and check == etag:
return HTTPResponse(status=304, **headers)
ims = getenv('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
range_header = getenv('HTTP_RANGE')
if range_header:
ranges = list(parse_range_header(range_header, clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
#: Header tokenizer used by _parse_http_header()
_hsplit = re.compile('(?:(?:"((?:[^"\\\\]+|\\\\.)*)")|([^;,=]+))([;,=]?)').findall
def _parse_http_header(h):
""" Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values
and parameters. For non-standard or broken input, this implementation may return partial results.
:param h: A header string (e.g. ``text/html,text/plain;q=0.9,*/*;q=0.8``)
:return: List of (value, params) tuples. The second element is a (possibly empty) dict.
"""
values = []
if '"' not in h: # INFO: Fast path without regexp (~2x faster)
for value in h.split(','):
parts = value.split(';')
values.append((parts[0].strip(), {}))
for attr in parts[1:]:
name, value = attr.split('=', 1)
values[-1][1][name.strip()] = value.strip()
else:
lop, key, attrs = ',', None, {}
for quoted, plain, tok in _hsplit(h):
value = plain.strip() if plain else quoted.replace('\\"', '"')
if lop == ',':
attrs = {}
values.append((value, attrs))
elif lop == ';':
if tok == '=':
key = value
else:
attrs[value] = ''
elif lop == '=' and key:
attrs[key] = value
key = None
lop = tok
return values
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key, digestmod=None):
""" Encode and sign a pickle-able object. Return a (byte) string """
digestmod = digestmod or hashlib.sha256
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=digestmod).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key, digestmod=None):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
digestmod = digestmod or hashlib.sha256
hashed = hmac.new(tob(key), msg, digestmod=digestmod).digest()
if _lscmp(sig[1:], base64.b64encode(hashed)):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
# Before you edit or add a server adapter, please read:
# - https://github.com/bottlepy/bottle/pull/647#issuecomment-60152870
# - https://github.com/bottlepy/bottle/pull/865#issuecomment-242795341
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet, **self.options)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
depr(0, 13, "AppEngineServer no longer required",
"Configure your application directly in your app.yaml")
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AsyncioServerAdapter(ServerAdapter):
""" Extend ServerAdapter for adding custom event loop """
def get_event_loop(self):
pass
class AiohttpServer(AsyncioServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def get_event_loop(self):
import asyncio
return asyncio.new_event_loop()
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = self.get_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AiohttpUVLoopServer(AiohttpServer):
"""uvloop
https://github.com/MagicStack/uvloop
"""
def get_event_loop(self):
import uvloop
return uvloop.new_event_loop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'uvloop': AiohttpUVLoopServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets too old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(BottleException):
pass
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
raise depr(0, 12, "Empty template lookup path.", "Configure a template lookup path.")
if os.path.isabs(name):
raise depr(0, 12, "Use of absolute path for template name.",
"Refer to templates with names or paths relative to the lookup path.")
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.name)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
if name == self.filename:
fname = name
else:
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return (f.read().decode(self.encoding), fname, lambda: False)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
raise depr(0, 11, 'Unsupported template encodings.', 'Use utf-8 for templates.')
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup, syntax=self.syntax)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
exec(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''((?mx) # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if syntax not in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
for dictarg in args[1:]:
kwargs.update(dictarg)
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
%%try:
%%exc = repr(e.exception)
%%except:
%%exc = '<unprintable %%s object>' %% type(e.exception).__name__
%%end
<pre>{{exc}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multi-threaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app now deferred until needed)
# BC: 0.6.4 and needed for run()
apps = app = default_app = AppStack()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
def _main(argv): # pragma: no coverage
args, parser = _cli_parse(argv)
def _cli_error(cli_msg):
parser.print_help()
_stderr('\nError: %s\n' % cli_msg)
sys.exit(1)
if args.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args.app:
_cli_error("No application entry point specified.")
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (args.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
config = ConfigDict()
for cfile in args.conf or []:
try:
if cfile.endswith('.json'):
with open(cfile, 'rb') as fp:
config.load_dict(json_loads(fp.read()))
else:
config.load_config(cfile)
except configparser.Error as parse_error:
_cli_error(parse_error)
except IOError:
_cli_error("Unable to read config file %r" % cfile)
except (UnicodeError, TypeError, ValueError) as error:
_cli_error("Unable to parse config file %r: %s" % (cfile, error))
for cval in args.param or []:
if '=' in cval:
config.update((cval.split('=', 1),))
else:
config[cval] = True
run(args.app,
host=host,
port=int(port),
server=args.server,
reloader=args.reload,
plugins=args.plugin,
debug=args.debug,
config=config)
if __name__ == '__main__': # pragma: no coverage
_main(sys.argv)
| ./CrossVul/dataset_final_sorted/CWE-93/py/good_5519_0 |
crossvul-python_data_bad_5519_0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2015, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# Command-line interface ######################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to _main() is at the end of the file.
def _cli_parse(args): # pragma: no coverage
from argparse import ArgumentParser
parser = ArgumentParser(usage="usage: %sprog [options] package.module:app")
opt = parser.add_argument
opt('app', help='WSGI app entry point.')
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
cli_args = parser.parse_args(args)
return cli_args, parser
def _cli_patch(cli_args): # pragma: no coverage
parsed_args, _ = _cli_parse(cli_args)
opts = parsed_args
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ##########################################
###############################################################################
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, tempfile, threading, time, warnings, weakref, hashlib
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
from json import dumps as json_dumps, loads as json_lds
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(param.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
try:
from inspect import getfullargspec
def getargspec(func):
spec = getfullargspec(func)
kwargs = makelist(spec[0]) + makelist(spec.kwonlyargs)
return kwargs, spec[1], spec[2], spec[3]
except ImportError:
from inspect import getargspec
py3k = sys.version_info.major > 2
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
import configparser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
import ConfigParser as configparser
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
exec(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(major, minor, cause, fix):
text = "Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\n"\
"Cause: %s\n"\
"Fix: %s\n" % (major, minor, cause, fix)
if DEBUG == 'strict':
raise DeprecationWarning(text)
warnings.warn(text, DeprecationWarning, stacklevel=3)
return DeprecationWarning(text)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
update_wrapper(self, func)
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events #######################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if g[2] is not None:
depr(0, 13, "Use of old route syntax.",
"Use <name> instead of :name in routes.")
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError as E:
raise RouteBuildError('Missing URL argument: %r' % E.args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = app.config._make_overlay()
self.config.load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
depr(0, 13, "Route.get_config() is deprectated.",
"The Route.config property already includes values from the"
" application config for missing keys. Access it directly.")
return self.config.get(key, default)
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
@lazy_attribute
def _global_config(cls):
cfg = ConfigDict()
cfg.meta_set('catchall', 'validate', bool)
return cfg
def __init__(self, **kwargs):
#: A :class:`ConfigDict` for app specific configuration.
self.config = self._global_config._make_overlay()
self.config._add_change_listener(
functools.partial(self.trigger_hook, 'config'))
self.config.update({
"catchall": True
})
if kwargs.get('catchall') is False:
depr(0,13, "Bottle(catchall) keyword argument.",
"The 'catchall' setting is now part of the app "
"configuration. Fix: `app.config['catchall'] = False`")
self.config['catchall'] = False
if kwargs.get('autojson') is False:
depr(0, 13, "Bottle(autojson) keyword argument.",
"The 'autojson' setting is now part of the app "
"configuration. Fix: `app.config['json.enable'] = False`")
self.config['json.disable'] = True
self._mounts = []
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = {'after_request'}
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def _mount_wsgi(self, prefix, app, **options):
segments = [p for p in prefix.split('/') if p]
if not segments:
raise ValueError('WSGI applications cannot be mounted to "/".')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def _mount_app(self, prefix, app, **options):
if app in self._mounts or '_mount.app' in app.config:
depr(0, 13, "Application mounted multiple times. Falling back to WSGI mount.",
"Clone application before mounting to a different location.")
return self._mount_wsgi(prefix, app, **options)
if options:
depr(0, 13, "Unsupported mount options. Falling back to WSGI mount.",
"Do not specify any route options when mounting bottle application.")
return self._mount_wsgi(prefix, app, **options)
if not prefix.endswith("/"):
depr(0, 13, "Prefix must end in '/'. Falling back to WSGI mount.",
"Consider adding an explicit redirect from '/prefix' to '/prefix/' in the parent application.")
return self._mount_wsgi(prefix, app, **options)
self._mounts.append(app)
app.config['_mount.prefix'] = prefix
app.config['_mount.app'] = self
for route in app.routes:
route.rule = prefix + route.rule.lstrip('/')
self.add_route(route)
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
parent_app.mount('/prefix/', child_app)
:param prefix: path prefix or `mount-point`.
:param app: an instance of :class:`Bottle` or a WSGI application.
Plugins from the parent application are not applied to the routes
of the mounted child application. If you need plugins in the child
application, install them separately.
While it is possible to use path wildcards within the prefix path
(:class:`Bottle` childs only), it is highly discouraged.
The prefix path must end with a slash. If you want to access the
root of the child application via `/prefix` in addition to
`/prefix/`, consider adding a route with a 307 redirect to the
parent application.
"""
if not prefix.startswith('/'):
raise ValueError("Prefix must start with '/'")
if isinstance(app, Bottle):
return self._mount_app(prefix, app, **options)
else:
return self._mount_wsgi(prefix, app, **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res, template_settings=dict(name='__ERROR_PAGE_TEMPLATE')))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8', 'ignore')
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
while True: # Remove in 0.14 together with RouteReset
out = None
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
out = route.call(**args)
break
except HTTPResponse as E:
out = E
break
except RouteReset:
depr(0, 13, "RouteReset exception deprecated",
"Call route.call() after route.reset() and "
"return the result.")
route.reset()
continue
finally:
if isinstance(out, HTTPResponse):
out.apply(response)
try:
self.trigger_hook('after_request')
except HTTPResponse as E:
out = E
out.apply(response)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
environ['wsgi.errors'].flush()
out = HTTPError(500, "Internal Server Error", E, stacktrace)
out.apply(response)
return out
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse as E:
first = E
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as error:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', error, format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(E)), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
environ['wsgi.errors'].flush()
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError("Attribute %s already defined. Plugin conflict?" % name)
self.__dict__[name] = value
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json`` or
``application/json-rpc``, this property holds the parsed content
of the request body. Only requests smaller than :attr:`MEMFILE_MAX`
are processed to avoid memory exhaustion.
Invalid JSON raises a 400 error response.
"""
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype in ('application/json', 'application/json-rpc'):
b = self._get_body_string()
if not b:
return None
try:
return json_loads(b)
except (ValueError, TypeError):
raise HTTPError(400, 'Invalid JSON')
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
key = 'bottle.request.ext.%s' % name
if key in self.environ:
raise AttributeError("Attribute already defined: %s" % name)
self.environ[key] = value
def __delattr__(self, name, value):
try:
del self.environ['bottle.request.ext.%s' % name]
except KeyError:
raise AttributeError("Attribute not defined: %s" % name)
def _hkey(s):
return s.title().replace('_', '-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: frozenset(('Content-Type', 'Content-Length')),
304: frozenset(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else
str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode)
else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(
value if isinstance(value, unicode) else str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
return [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
else:
return [(k, v.encode('utf8') if isinstance(v, unicode) else v)
for (k, v) in out]
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
# Cookie size plus options must not exceed 4kb.
if len(name) + len(value) > 3800:
raise ValueError('Content does not fit into a cookie.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **more_headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def setup(self, app):
app.config._define('json.enable', default=True, validate=bool,
help="Enable or disable automatic dict->json filter.")
app.config._define('json.ascii', default=False, validate=bool,
help="Use only 7-bit ASCII characters in output.")
app.config._define('json.indent', default=True, validate=bool,
help="Add whitespace to make json more readable.")
app.config._define('json.dump_func', default=None,
help="If defined, use this function to transform"
" dict into json. The other options no longer"
" apply.")
def apply(self, callback, route):
dumps = self.json_dumps
if not self.json_dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPResponse as resp:
rv = resp
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def setup(self, app):
app.tpl = self
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(
value if isinstance(value, unicode) else str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
_UNSET = object()
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, overlays and more.
This dict-like class is heavily optimized for read access. All read-only
methods as well as item access should be as fast as the built-in dict.
"""
__slots__ = ('_meta', '_change_listener', '_overlays', '_virtual_keys', '_source', '__weakref__')
def __init__(self):
self._meta = {}
self._change_listener = []
#: Weak references of overlays that need to be kept in sync.
self._overlays = []
#: Config that is the source for this overlay.
self._source = None
#: Keys of values copied from the source (values we do not own)
self._virtual_keys = set()
def load_module(self, path, squash=True):
"""Load values from a Python module.
Example modue ``config.py``::
DEBUG = True
SQLITE = {
"db": ":memory:"
}
>>> c = ConfigDict()
>>> c.load_module('config')
{DEBUG: True, 'SQLITE.DB': 'memory'}
>>> c.load_module("config", False)
{'DEBUG': True, 'SQLITE': {'DB': 'memory'}}
:param squash: If true (default), dictionary values are assumed to
represent namespaces (see :meth:`load_dict`).
"""
config_obj = load(path)
obj = {key: getattr(config_obj, key) for key in dir(config_obj)
if key.isupper()}
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self
def load_config(self, filename, **options):
""" Load values from an ``*.ini`` style config file.
A configuration file consists of sections, each led by a
``[section]`` header, followed by key/value entries separated by
either ``=`` or ``:``. Section names and keys are case-insensitive.
Leading and trailing whitespace is removed from keys and values.
Values can be omitted, in which case the key/value delimiter may
also be left out. Values can also span multiple lines, as long as
they are indented deeper than the first line of the value. Commends
are prefixed by ``#`` or ``;`` and may only appear on their own on
an otherwise empty line.
Both section and key names may contain dots (``.``) as namespace
separators. The actual configuration parameter name is constructed
by joining section name and key name together and converting to
lower case.
The special sections ``bottle`` and ``ROOT`` refer to the root
namespace and the ``DEFAULT`` section defines default values for all
other sections.
With Python 3, extended string interpolation is enabled.
:param filename: The path of a config file, or a list of paths.
:param options: All keyword parameters are passed to the underlying
:class:`python:configparser.ConfigParser` constructor call.
"""
options.setdefault('allow_no_value', True)
if py3k:
options.setdefault('interpolation',
configparser.ExtendedInterpolation())
conf = configparser.ConfigParser(**options)
conf.read(filename)
for section in conf.sections():
for key in conf.options(section):
value = conf.get(section, key)
if section not in ['bottle', 'ROOT']:
key = section + '.' + key
self[key.lower()] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, basestring):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
>>> c = ConfigDict()
>>> c.update('some.namespace', key='value')
"""
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
self._virtual_keys.discard(key)
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
for overlay in self._iter_overlays():
overlay._set_virtual(key, value)
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
if key in self._virtual_keys:
raise KeyError("Virtual keys cannot be deleted: %s" % key)
if self._source and key in self._source:
# Not virtual, but present in source -> Restore virtual value
dict.__delitem__(self, key)
self._set_virtual(key, self._source[key])
else: # not virtual, not present in source. This is OUR value
self._on_change(key, None)
dict.__delitem__(self, key)
for overlay in self._iter_overlays():
overlay._delete_virtual(key)
def _set_virtual(self, key, value):
""" Recursively set or update virtual keys. Do nothing if non-virtual
value is present. """
if key in self and key not in self._virtual_keys:
return # Do nothing for non-virtual keys.
self._virtual_keys.add(key)
if key in self and self[key] is not value:
self._on_change(key, value)
dict.__setitem__(self, key, value)
for overlay in self._iter_overlays():
overlay._set_virtual(key, value)
def _delete_virtual(self, key):
""" Recursively delete virtual entry. Do nothing if key is not virtual.
"""
if key not in self._virtual_keys:
return # Do nothing for non-virtual keys.
if key in self:
self._on_change(key, None)
dict.__delitem__(self, key)
self._virtual_keys.discard(key)
for overlay in self._iter_overlays():
overlay._delete_virtual(key)
def _on_change(self, key, value):
for cb in self._change_listener:
if cb(self, key, value):
return True
def _add_change_listener(self, func):
self._change_listener.append(func)
return func
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. """
self._meta.setdefault(key, {})[metafield] = value
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
def _define(self, key, default=_UNSET, help=_UNSET, validate=_UNSET):
""" (Unstable) Shortcut for plugins to define own config parameters. """
if default is not _UNSET:
self.setdefault(key, default)
if help is not _UNSET:
self.meta_set(key, 'help', help)
if validate is not _UNSET:
self.meta_set(key, 'validate', validate)
def _iter_overlays(self):
for ref in self._overlays:
overlay = ref()
if overlay is not None:
yield overlay
def _make_overlay(self):
""" (Unstable) Create a new overlay that acts like a chained map: Values
missing in the overlay are copied from the source map. Both maps
share the same meta entries.
Entries that were copied from the source are called 'virtual'. You
can not delete virtual keys, but overwrite them, which turns them
into non-virtual entries. Setting keys on an overlay never affects
its source, but may affect any number of child overlays.
Other than collections.ChainMap or most other implementations, this
approach does not resolve missing keys on demand, but instead
actively copies all values from the source to the overlay and keeps
track of virtual and non-virtual keys internally. This removes any
lookup-overhead. Read-access is as fast as a build-in dict for both
virtual and non-virtual keys.
Changes are propagated recursively and depth-first. A failing
on-change handler in an overlay stops the propagation of virtual
values and may result in an partly updated tree. Take extra care
here and make sure that on-change handlers never fail.
Used by Route.config
"""
# Cleanup dead references
self._overlays[:] = [ref for ref in self._overlays if ref() is not None]
overlay = ConfigDict()
overlay._meta = self._meta
overlay._source = self
self._overlays.append(weakref.ref(overlay))
for key in self:
overlay._set_virtual(key, self[key])
return overlay
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self.default
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
new_app = push
@property
def default(self):
try:
return self[-1]
except IndexError:
return self.push()
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype=True,
download=False,
charset='UTF-8',
etag=None):
""" Open a file in a safe way and return an instance of :exc:`HTTPResponse`
that can be sent back to the client.
:param filename: Name or path of the file to send, relative to ``root``.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Provide the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset for files with a ``text/*`` mime-type.
(default: UTF-8)
:param etag: Provide a pre-computed ETag header. If set to ``False``,
ETag handling is disabled. (default: auto-generate ETag header)
While checking user input is always a good idea, this function provides
additional protection against malicious ``filename`` parameters from
breaking out of the ``root`` directory and leaking sensitive information
to an attacker.
Read-protected files or files outside of the ``root`` directory are
answered with ``403 Access Denied``. Missing files result in a
``404 Not Found`` response. Conditional requests (``If-Modified-Since``,
``If-None-Match``) are answered with ``304 Not Modified`` whenever
possible. ``HEAD`` and ``Range`` requests (used by download managers to
check or continue partial downloads) are also handled automatically.
"""
root = os.path.join(os.path.abspath(root), '')
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype is True:
if download and download is not True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if (mimetype[:5] == 'text/' or mimetype == 'application/javascript')\
and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download is True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
getenv = request.environ.get
if etag is None:
etag = '%d:%d:%d:%d:%s' % (stats.st_dev, stats.st_ino, stats.st_mtime,
clen, filename)
etag = hashlib.sha1(tob(etag)).hexdigest()
if etag:
headers['ETag'] = etag
check = getenv('HTTP_IF_NONE_MATCH')
if check and check == etag:
return HTTPResponse(status=304, **headers)
ims = getenv('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
range_header = getenv('HTTP_RANGE')
if range_header:
ranges = list(parse_range_header(range_header, clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
#: Header tokenizer used by _parse_http_header()
_hsplit = re.compile('(?:(?:"((?:[^"\\\\]+|\\\\.)*)")|([^;,=]+))([;,=]?)').findall
def _parse_http_header(h):
""" Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values
and parameters. For non-standard or broken input, this implementation may return partial results.
:param h: A header string (e.g. ``text/html,text/plain;q=0.9,*/*;q=0.8``)
:return: List of (value, params) tuples. The second element is a (possibly empty) dict.
"""
values = []
if '"' not in h: # INFO: Fast path without regexp (~2x faster)
for value in h.split(','):
parts = value.split(';')
values.append((parts[0].strip(), {}))
for attr in parts[1:]:
name, value = attr.split('=', 1)
values[-1][1][name.strip()] = value.strip()
else:
lop, key, attrs = ',', None, {}
for quoted, plain, tok in _hsplit(h):
value = plain.strip() if plain else quoted.replace('\\"', '"')
if lop == ',':
attrs = {}
values.append((value, attrs))
elif lop == ';':
if tok == '=':
key = value
else:
attrs[value] = ''
elif lop == '=' and key:
attrs[key] = value
key = None
lop = tok
return values
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key, digestmod=None):
""" Encode and sign a pickle-able object. Return a (byte) string """
digestmod = digestmod or hashlib.sha256
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=digestmod).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key, digestmod=None):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
digestmod = digestmod or hashlib.sha256
hashed = hmac.new(tob(key), msg, digestmod=digestmod).digest()
if _lscmp(sig[1:], base64.b64encode(hashed)):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
# Before you edit or add a server adapter, please read:
# - https://github.com/bottlepy/bottle/pull/647#issuecomment-60152870
# - https://github.com/bottlepy/bottle/pull/865#issuecomment-242795341
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet, **self.options)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
depr(0, 13, "AppEngineServer no longer required",
"Configure your application directly in your app.yaml")
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AsyncioServerAdapter(ServerAdapter):
""" Extend ServerAdapter for adding custom event loop """
def get_event_loop(self):
pass
class AiohttpServer(AsyncioServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def get_event_loop(self):
import asyncio
return asyncio.new_event_loop()
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = self.get_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AiohttpUVLoopServer(AiohttpServer):
"""uvloop
https://github.com/MagicStack/uvloop
"""
def get_event_loop(self):
import uvloop
return uvloop.new_event_loop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'uvloop': AiohttpUVLoopServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets too old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(BottleException):
pass
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
raise depr(0, 12, "Empty template lookup path.", "Configure a template lookup path.")
if os.path.isabs(name):
raise depr(0, 12, "Use of absolute path for template name.",
"Refer to templates with names or paths relative to the lookup path.")
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.name)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
if name == self.filename:
fname = name
else:
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return (f.read().decode(self.encoding), fname, lambda: False)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
raise depr(0, 11, 'Unsupported template encodings.', 'Use utf-8 for templates.')
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup, syntax=self.syntax)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
exec(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''((?mx) # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if syntax not in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
for dictarg in args[1:]:
kwargs.update(dictarg)
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
%%try:
%%exc = repr(e.exception)
%%except:
%%exc = '<unprintable %%s object>' %% type(e.exception).__name__
%%end
<pre>{{exc}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multi-threaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app now deferred until needed)
# BC: 0.6.4 and needed for run()
apps = app = default_app = AppStack()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
def _main(argv): # pragma: no coverage
args, parser = _cli_parse(argv)
def _cli_error(cli_msg):
parser.print_help()
_stderr('\nError: %s\n' % cli_msg)
sys.exit(1)
if args.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args.app:
_cli_error("No application entry point specified.")
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (args.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
config = ConfigDict()
for cfile in args.conf or []:
try:
if cfile.endswith('.json'):
with open(cfile, 'rb') as fp:
config.load_dict(json_loads(fp.read()))
else:
config.load_config(cfile)
except configparser.Error as parse_error:
_cli_error(parse_error)
except IOError:
_cli_error("Unable to read config file %r" % cfile)
except (UnicodeError, TypeError, ValueError) as error:
_cli_error("Unable to parse config file %r: %s" % (cfile, error))
for cval in args.param or []:
if '=' in cval:
config.update((cval.split('=', 1),))
else:
config[cval] = True
run(args.app,
host=host,
port=int(port),
server=args.server,
reloader=args.reload,
plugins=args.plugin,
debug=args.debug,
config=config)
if __name__ == '__main__': # pragma: no coverage
_main(sys.argv)
| ./CrossVul/dataset_final_sorted/CWE-93/py/bad_5519_0 |
crossvul-python_data_bad_1501_0 | # -*- coding: utf-8 -*-
'''
Execute chef in server or solo mode
'''
# Import Python libs
import logging
import os
import tempfile
# Import Salt libs
import salt.utils
import salt.utils.decorators as decorators
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if chef is installed
'''
if not salt.utils.which('chef-client'):
return False
return True
def _default_logfile(exe_name):
'''
Retrieve the logfile name
'''
if salt.utils.is_windows():
logfile_tmp = tempfile.NamedTemporaryFile(dir=os.environ['TMP'],
prefix=exe_name,
suffix='.log',
delete=False)
logfile = logfile_tmp.name
logfile_tmp.close()
else:
logfile = salt.utils.path_join(
'/var/log',
'{0}.log'.format(exe_name)
)
return logfile
@decorators.which('chef-client')
def client(whyrun=False,
localmode=False,
logfile=_default_logfile('chef-client'),
**kwargs):
'''
Execute a chef client run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.client server=https://localhost
server
The chef server URL
client_key
Set the client key file location
config
The configuration file to use
config-file-jail
Directory under which config files are allowed to be loaded
(no client.rb or knife.rb outside this path will be loaded).
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
localmode
Point chef-client at local repository if True
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
pid
Set the PID file location, defaults to /tmp/chef-client.pid
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
runlist
Permanently replace current run list with specified items
user
User to set privilege to
validation_key
Set the validation key file location, used for registering new clients
whyrun
Enable whyrun mode when set to True
'''
args = ['chef-client',
'--no-color',
'--once',
'--logfile "{0}"'.format(logfile),
'--format doc']
if whyrun:
args.append('--why-run')
if localmode:
args.append('--local-mode')
return _exec_cmd(*args, **kwargs)
@decorators.which('chef-solo')
def solo(whyrun=False,
logfile=_default_logfile('chef-solo'),
**kwargs):
'''
Execute a chef solo run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.solo override-runlist=test
config
The configuration file to use
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
recipe-url
Pull down a remote gzipped tarball of recipes and untar it to
the cookbook cache
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
user
User to set privilege to
whyrun
Enable whyrun mode when set to True
'''
args = ['chef-solo',
'--no-color',
'--logfile "{0}"'.format(logfile),
'--format doc']
args = ['chef-solo', '--no-color', '--logfile {0}'.format(logfile)]
if whyrun:
args.append('--why-run')
return _exec_cmd(*args, **kwargs)
def _exec_cmd(*args, **kwargs):
# Compile the command arguments
cmd_args = ' '.join(args)
cmd_kwargs = ''.join([
' --{0} {1}'.format(k, v)
for k, v in kwargs.items() if not k.startswith('__')]
)
cmd_exec = '{0}{1}'.format(cmd_args, cmd_kwargs)
log.debug('Chef command: {0}'.format(cmd_exec))
return __salt__['cmd.run_all'](cmd_exec, python_shell=False)
| ./CrossVul/dataset_final_sorted/CWE-19/py/bad_1501_0 |
crossvul-python_data_bad_1500_0 | # -*- coding: utf-8 -*-
'''
Wrapper around Server Density API
=================================
.. versionadded:: 2014.7.0
'''
import requests
import json
import logging
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def get_sd_auth(val, sd_auth_pillar_name='serverdensity'):
'''
Returns requested Server Density authentication value from pillar.
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.get_sd_auth <val>
'''
sd_pillar = __pillar__.get(sd_auth_pillar_name)
log.debug('Server Density Pillar: {0}'.format(sd_pillar))
if not sd_pillar:
log.error('Cloud not load {0} pillar'.format(sd_auth_pillar_name))
raise CommandExecutionError(
'{0} pillar is required for authentication'.format(sd_auth_pillar_name)
)
try:
return sd_pillar[val]
except KeyError:
log.error('Cloud not find value {0} in pillar'.format(val))
raise CommandExecutionError('{0} value was not found in pillar'.format(val))
def _clean_salt_variables(params, variable_prefix="__"):
'''
Pops out variables from params which starts with `variable_prefix`.
'''
map(params.pop, [k for k in params if k.startswith(variable_prefix)])
return params
def create(name, **params):
'''
Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768
'''
log.debug('Server Density params: {0}'.format(params))
params = _clean_salt_variables(params)
params['name'] = name
api_response = requests.post(
'https://api.serverdensity.io/inventory/devices/',
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: {0}'.format(api_response.content))
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def delete(device_id):
'''
Delete a device from Server Density. For more information, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Deleting
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.delete 51f7eafcdba4bb235e000ae4
'''
api_response = requests.delete(
'https://api.serverdensity.io/inventory/devices/' + device_id,
params={'token': get_sd_auth('api_token')}
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: {0}'.format(api_response.content))
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def ls(**params):
'''
List devices in Server Density
Results will be filtered by any params passed to this function. For more
information, see the API docs on listing_ and searching_.
.. _listing: https://apidocs.serverdensity.com/Inventory/Devices/Listing
.. _searching: https://apidocs.serverdensity.com/Inventory/Devices/Searching
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.ls
salt '*' serverdensity_device.ls name=lama
salt '*' serverdensity_device.ls name=lama group=lama_band installedRAM=32768
'''
params = _clean_salt_variables(params)
endpoint = 'devices'
# Change endpoint if there are params to filter by:
if params:
endpoint = 'resources'
# Convert all ints to strings:
for k, v in params.items():
params[k] = str(v)
api_response = requests.get(
'https://api.serverdensity.io/inventory/{0}'.format(endpoint),
params={'token': get_sd_auth('api_token'), 'filter': json.dumps(params)}
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error(
'Could not parse Server Density API Response content: {0}'
.format(api_response.content)
)
raise CommandExecutionError(
'Failed to create, Server Density API Response: {0}'
.format(api_response)
)
else:
return None
def update(device_id, **params):
'''
Updates device information in Server Density. For more information see the
`API docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Updating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.update 51f7eafcdba4bb235e000ae4 name=lama group=lama_band
salt '*' serverdensity_device.update 51f7eafcdba4bb235e000ae4 name=better_lama group=rock_lamas swapSpace=512
'''
params = _clean_salt_variables(params)
api_response = requests.put(
'https://api.serverdensity.io/inventory/devices/' + device_id,
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error(
'Could not parse Server Density API Response content: {0}'
.format(api_response.content)
)
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def install_agent(agent_key):
'''
Function downloads Server Density installation agent, and installs sd-agent
with agent_key.
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.install_agent c2bbdd6689ff46282bdaa07555641498
'''
work_dir = '/tmp/'
account_url = get_sd_auth('account_url')
__salt__['cmd.run'](
cmd='curl https://www.serverdensity.com/downloads/agent-install.sh -o install.sh',
cwd=work_dir
)
__salt__['cmd.run'](cmd='chmod +x install.sh', cwd=work_dir)
return __salt__['cmd.run'](
cmd='./install.sh -a {account_url} -k {agent_key}'.format(
account_url=account_url, agent_key=agent_key),
cwd=work_dir
)
| ./CrossVul/dataset_final_sorted/CWE-19/py/bad_1500_0 |
crossvul-python_data_bad_1502_0 | # -*- coding: utf-8 -*-
'''
Execute chef in server or solo mode
'''
# Import Python libs
import logging
import os
# Import Salt libs
import salt.utils
import salt.utils.decorators as decorators
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if chef is installed
'''
if not salt.utils.which('chef-client'):
return False
return True
def _default_logfile(exe_name):
if salt.utils.is_windows():
logfile = salt.utils.path_join(
os.environ['TMP'],
'{0}.log'.format(exe_name)
)
else:
logfile = salt.utils.path_join(
'/var/log',
'{0}.log'.format(exe_name)
)
return logfile
@decorators.which('chef-client')
def client(whyrun=False,
localmode=False,
logfile=_default_logfile('chef-client'),
**kwargs):
'''
Execute a chef client run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.client server=https://localhost
server
The chef server URL
client_key
Set the client key file location
config
The configuration file to use
config-file-jail
Directory under which config files are allowed to be loaded
(no client.rb or knife.rb outside this path will be loaded).
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
localmode
Point chef-client at local repository if True
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
pid
Set the PID file location, defaults to /tmp/chef-client.pid
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
runlist
Permanently replace current run list with specified items
user
User to set privilege to
validation_key
Set the validation key file location, used for registering new clients
whyrun
Enable whyrun mode when set to True
'''
args = ['chef-client',
'--no-color',
'--once',
'--logfile "{0}"'.format(logfile),
'--format doc']
if whyrun:
args.append('--why-run')
if localmode:
args.append('--local-mode')
return _exec_cmd(*args, **kwargs)
@decorators.which('chef-solo')
def solo(whyrun=False,
logfile=_default_logfile('chef-solo'),
**kwargs):
'''
Execute a chef solo run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.solo override-runlist=test
config
The configuration file to use
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
recipe-url
Pull down a remote gzipped tarball of recipes and untar it to
the cookbook cache
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
user
User to set privilege to
whyrun
Enable whyrun mode when set to True
'''
args = ['chef-solo',
'--no-color',
'--logfile "{0}"'.format(logfile),
'--format doc']
args = ['chef-solo', '--no-color', '--logfile {0}'.format(logfile)]
if whyrun:
args.append('--why-run')
return _exec_cmd(*args, **kwargs)
def _exec_cmd(*args, **kwargs):
# Compile the command arguments
cmd_args = ' '.join(args)
cmd_kwargs = ''.join([
' --{0} {1}'.format(k, v)
for k, v in kwargs.items() if not k.startswith('__')]
)
cmd_exec = '{0}{1}'.format(cmd_args, cmd_kwargs)
log.debug('Chef command: {0}'.format(cmd_exec))
return __salt__['cmd.run_all'](cmd_exec, python_shell=False)
| ./CrossVul/dataset_final_sorted/CWE-19/py/bad_1502_0 |
crossvul-python_data_good_1502_0 | # -*- coding: utf-8 -*-
'''
Execute chef in server or solo mode
'''
# Import Python libs
import logging
import os
import tempfile
# Import Salt libs
import salt.utils
import salt.utils.decorators as decorators
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if chef is installed
'''
if not salt.utils.which('chef-client'):
return False
return True
def _default_logfile(exe_name):
'''
Retrieve the logfile name
'''
if salt.utils.is_windows():
logfile_tmp = tempfile.NamedTemporaryFile(dir=os.environ['TMP'],
prefix=exe_name,
suffix='.log',
delete=False)
logfile = logfile_tmp.name
logfile_tmp.close()
else:
logfile = salt.utils.path_join(
'/var/log',
'{0}.log'.format(exe_name)
)
return logfile
@decorators.which('chef-client')
def client(whyrun=False,
localmode=False,
logfile=_default_logfile('chef-client'),
**kwargs):
'''
Execute a chef client run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.client server=https://localhost
server
The chef server URL
client_key
Set the client key file location
config
The configuration file to use
config-file-jail
Directory under which config files are allowed to be loaded
(no client.rb or knife.rb outside this path will be loaded).
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
localmode
Point chef-client at local repository if True
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
pid
Set the PID file location, defaults to /tmp/chef-client.pid
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
runlist
Permanently replace current run list with specified items
user
User to set privilege to
validation_key
Set the validation key file location, used for registering new clients
whyrun
Enable whyrun mode when set to True
'''
args = ['chef-client',
'--no-color',
'--once',
'--logfile "{0}"'.format(logfile),
'--format doc']
if whyrun:
args.append('--why-run')
if localmode:
args.append('--local-mode')
return _exec_cmd(*args, **kwargs)
@decorators.which('chef-solo')
def solo(whyrun=False,
logfile=_default_logfile('chef-solo'),
**kwargs):
'''
Execute a chef solo run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.solo override-runlist=test
config
The configuration file to use
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
recipe-url
Pull down a remote gzipped tarball of recipes and untar it to
the cookbook cache
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
user
User to set privilege to
whyrun
Enable whyrun mode when set to True
'''
args = ['chef-solo',
'--no-color',
'--logfile "{0}"'.format(logfile),
'--format doc']
args = ['chef-solo', '--no-color', '--logfile {0}'.format(logfile)]
if whyrun:
args.append('--why-run')
return _exec_cmd(*args, **kwargs)
def _exec_cmd(*args, **kwargs):
# Compile the command arguments
cmd_args = ' '.join(args)
cmd_kwargs = ''.join([
' --{0} {1}'.format(k, v)
for k, v in kwargs.items() if not k.startswith('__')]
)
cmd_exec = '{0}{1}'.format(cmd_args, cmd_kwargs)
log.debug('Chef command: {0}'.format(cmd_exec))
return __salt__['cmd.run_all'](cmd_exec, python_shell=False)
| ./CrossVul/dataset_final_sorted/CWE-19/py/good_1502_0 |
crossvul-python_data_good_1500_0 | # -*- coding: utf-8 -*-
'''
Wrapper around Server Density API
=================================
.. versionadded:: 2014.7.0
'''
import requests
import json
import logging
import os
import tempfile
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def get_sd_auth(val, sd_auth_pillar_name='serverdensity'):
'''
Returns requested Server Density authentication value from pillar.
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.get_sd_auth <val>
'''
sd_pillar = __pillar__.get(sd_auth_pillar_name)
log.debug('Server Density Pillar: {0}'.format(sd_pillar))
if not sd_pillar:
log.error('Cloud not load {0} pillar'.format(sd_auth_pillar_name))
raise CommandExecutionError(
'{0} pillar is required for authentication'.format(sd_auth_pillar_name)
)
try:
return sd_pillar[val]
except KeyError:
log.error('Cloud not find value {0} in pillar'.format(val))
raise CommandExecutionError('{0} value was not found in pillar'.format(val))
def _clean_salt_variables(params, variable_prefix="__"):
'''
Pops out variables from params which starts with `variable_prefix`.
'''
map(params.pop, [k for k in params if k.startswith(variable_prefix)])
return params
def create(name, **params):
'''
Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768
'''
log.debug('Server Density params: {0}'.format(params))
params = _clean_salt_variables(params)
params['name'] = name
api_response = requests.post(
'https://api.serverdensity.io/inventory/devices/',
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: {0}'.format(api_response.content))
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def delete(device_id):
'''
Delete a device from Server Density. For more information, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Deleting
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.delete 51f7eafcdba4bb235e000ae4
'''
api_response = requests.delete(
'https://api.serverdensity.io/inventory/devices/' + device_id,
params={'token': get_sd_auth('api_token')}
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: {0}'.format(api_response.content))
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def ls(**params):
'''
List devices in Server Density
Results will be filtered by any params passed to this function. For more
information, see the API docs on listing_ and searching_.
.. _listing: https://apidocs.serverdensity.com/Inventory/Devices/Listing
.. _searching: https://apidocs.serverdensity.com/Inventory/Devices/Searching
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.ls
salt '*' serverdensity_device.ls name=lama
salt '*' serverdensity_device.ls name=lama group=lama_band installedRAM=32768
'''
params = _clean_salt_variables(params)
endpoint = 'devices'
# Change endpoint if there are params to filter by:
if params:
endpoint = 'resources'
# Convert all ints to strings:
for k, v in params.items():
params[k] = str(v)
api_response = requests.get(
'https://api.serverdensity.io/inventory/{0}'.format(endpoint),
params={'token': get_sd_auth('api_token'), 'filter': json.dumps(params)}
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error(
'Could not parse Server Density API Response content: {0}'
.format(api_response.content)
)
raise CommandExecutionError(
'Failed to create, Server Density API Response: {0}'
.format(api_response)
)
else:
return None
def update(device_id, **params):
'''
Updates device information in Server Density. For more information see the
`API docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Updating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.update 51f7eafcdba4bb235e000ae4 name=lama group=lama_band
salt '*' serverdensity_device.update 51f7eafcdba4bb235e000ae4 name=better_lama group=rock_lamas swapSpace=512
'''
params = _clean_salt_variables(params)
api_response = requests.put(
'https://api.serverdensity.io/inventory/devices/' + device_id,
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error(
'Could not parse Server Density API Response content: {0}'
.format(api_response.content)
)
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def install_agent(agent_key):
'''
Function downloads Server Density installation agent, and installs sd-agent
with agent_key.
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.install_agent c2bbdd6689ff46282bdaa07555641498
'''
work_dir = os.path.join(__opts__['cachedir'], 'tmp')
if not os.path.isdir(work_dir):
os.mkdir(work_dir)
install_file = tempfile.NamedTemporaryFile(dir=work_dir,
suffix='.sh',
delete=False)
install_filename = install_file.name
install_file.close()
account_url = get_sd_auth('account_url')
__salt__['cmd.run'](
cmd='curl https://www.serverdensity.com/downloads/agent-install.sh -o {0}'.format(install_filename),
cwd=work_dir
)
__salt__['cmd.run'](cmd='chmod +x {0}'.format(install_filename), cwd=work_dir)
return __salt__['cmd.run'](
cmd='./{filename} -a {account_url} -k {agent_key}'.format(
filename=install_filename, account_url=account_url, agent_key=agent_key),
cwd=work_dir
)
| ./CrossVul/dataset_final_sorted/CWE-19/py/good_1500_0 |
crossvul-python_data_good_1501_0 | # -*- coding: utf-8 -*-
'''
Execute chef in server or solo mode
'''
# Import Python libs
import logging
import os
import tempfile
# Import Salt libs
import salt.utils
import salt.utils.decorators as decorators
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if chef is installed
'''
if not salt.utils.which('chef-client'):
return False
return True
def _default_logfile(exe_name):
'''
Retrieve the logfile name
'''
if salt.utils.is_windows():
tmp_dir = os.path.join(__opts__['cachedir'], 'tmp')
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
logfile_tmp = tempfile.NamedTemporaryFile(dir=tmp_dir,
prefix=exe_name,
suffix='.log',
delete=False)
logfile = logfile_tmp.name
logfile_tmp.close()
else:
logfile = salt.utils.path_join(
'/var/log',
'{0}.log'.format(exe_name)
)
return logfile
@decorators.which('chef-client')
def client(whyrun=False,
localmode=False,
logfile=_default_logfile('chef-client'),
**kwargs):
'''
Execute a chef client run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.client server=https://localhost
server
The chef server URL
client_key
Set the client key file location
config
The configuration file to use
config-file-jail
Directory under which config files are allowed to be loaded
(no client.rb or knife.rb outside this path will be loaded).
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
localmode
Point chef-client at local repository if True
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
pid
Set the PID file location, defaults to /tmp/chef-client.pid
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
runlist
Permanently replace current run list with specified items
user
User to set privilege to
validation_key
Set the validation key file location, used for registering new clients
whyrun
Enable whyrun mode when set to True
'''
args = ['chef-client',
'--no-color',
'--once',
'--logfile "{0}"'.format(logfile),
'--format doc']
if whyrun:
args.append('--why-run')
if localmode:
args.append('--local-mode')
return _exec_cmd(*args, **kwargs)
@decorators.which('chef-solo')
def solo(whyrun=False,
logfile=_default_logfile('chef-solo'),
**kwargs):
'''
Execute a chef solo run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.solo override-runlist=test
config
The configuration file to use
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
recipe-url
Pull down a remote gzipped tarball of recipes and untar it to
the cookbook cache
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
user
User to set privilege to
whyrun
Enable whyrun mode when set to True
'''
args = ['chef-solo',
'--no-color',
'--logfile "{0}"'.format(logfile),
'--format doc']
args = ['chef-solo', '--no-color', '--logfile {0}'.format(logfile)]
if whyrun:
args.append('--why-run')
return _exec_cmd(*args, **kwargs)
def _exec_cmd(*args, **kwargs):
# Compile the command arguments
cmd_args = ' '.join(args)
cmd_kwargs = ''.join([
' --{0} {1}'.format(k, v)
for k, v in kwargs.items() if not k.startswith('__')]
)
cmd_exec = '{0}{1}'.format(cmd_args, cmd_kwargs)
log.debug('Chef command: {0}'.format(cmd_exec))
return __salt__['cmd.run_all'](cmd_exec, python_shell=False)
| ./CrossVul/dataset_final_sorted/CWE-19/py/good_1501_0 |
crossvul-python_data_good_4113_0 | import discord
from redbot.core.bot import Red
from redbot.core import checks, commands, Config
from redbot.core.i18n import cog_i18n, Translator
from redbot.core.utils._internal_utils import send_to_owners_with_prefix_replaced
from redbot.core.utils.chat_formatting import escape, pagify
from .streamtypes import (
HitboxStream,
PicartoStream,
Stream,
TwitchStream,
YoutubeStream,
)
from .errors import (
APIError,
InvalidTwitchCredentials,
InvalidYoutubeCredentials,
OfflineStream,
StreamNotFound,
StreamsError,
)
from . import streamtypes as _streamtypes
import re
import logging
import asyncio
import aiohttp
import contextlib
from datetime import datetime
from collections import defaultdict
from typing import Optional, List, Tuple, Union, Dict
_ = Translator("Streams", __file__)
log = logging.getLogger("red.core.cogs.Streams")
@cog_i18n(_)
class Streams(commands.Cog):
"""Various commands relating to streaming platforms.
You can check if a Twitch, YouTube or Picarto stream is
currently live.
"""
global_defaults = {
"refresh_timer": 300,
"tokens": {},
"streams": [],
"notified_owner_missing_twitch_secret": False,
}
guild_defaults = {
"autodelete": False,
"mention_everyone": False,
"mention_here": False,
"live_message_mention": False,
"live_message_nomention": False,
"ignore_reruns": False,
}
role_defaults = {"mention": False}
def __init__(self, bot: Red):
super().__init__()
self.config: Config = Config.get_conf(self, 26262626)
self.ttv_bearer_cache: dict = {}
self.config.register_global(**self.global_defaults)
self.config.register_guild(**self.guild_defaults)
self.config.register_role(**self.role_defaults)
self.bot: Red = bot
self.streams: List[Stream] = []
self.task: Optional[asyncio.Task] = None
self.yt_cid_pattern = re.compile("^UC[-_A-Za-z0-9]{21}[AQgw]$")
self._ready_event: asyncio.Event = asyncio.Event()
self._init_task: asyncio.Task = self.bot.loop.create_task(self.initialize())
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete """
return
def check_name_or_id(self, data: str) -> bool:
matched = self.yt_cid_pattern.fullmatch(data)
if matched is None:
return True
return False
async def initialize(self) -> None:
"""Should be called straight after cog instantiation."""
await self.bot.wait_until_ready()
try:
await self.move_api_keys()
await self.get_twitch_bearer_token()
self.streams = await self.load_streams()
self.task = self.bot.loop.create_task(self._stream_alerts())
except Exception as error:
log.exception("Failed to initialize Streams cog:", exc_info=error)
self._ready_event.set()
@commands.Cog.listener()
async def on_red_api_tokens_update(self, service_name, api_tokens):
if service_name == "twitch":
await self.get_twitch_bearer_token(api_tokens)
async def cog_before_invoke(self, ctx: commands.Context):
await self._ready_event.wait()
async def move_api_keys(self) -> None:
"""Move the API keys from cog stored config to core bot config if they exist."""
tokens = await self.config.tokens()
youtube = await self.bot.get_shared_api_tokens("youtube")
twitch = await self.bot.get_shared_api_tokens("twitch")
for token_type, token in tokens.items():
if token_type == "YoutubeStream" and "api_key" not in youtube:
await self.bot.set_shared_api_tokens("youtube", api_key=token)
if token_type == "TwitchStream" and "client_id" not in twitch:
# Don't need to check Community since they're set the same
await self.bot.set_shared_api_tokens("twitch", client_id=token)
await self.config.tokens.clear()
async def get_twitch_bearer_token(self, api_tokens: Optional[Dict] = None) -> None:
tokens = (
await self.bot.get_shared_api_tokens("twitch") if api_tokens is None else api_tokens
)
if tokens.get("client_id"):
notified_owner_missing_twitch_secret = (
await self.config.notified_owner_missing_twitch_secret()
)
try:
tokens["client_secret"]
if notified_owner_missing_twitch_secret is True:
await self.config.notified_owner_missing_twitch_secret.set(False)
except KeyError:
message = _(
"You need a client secret key if you want to use the Twitch API on this cog.\n"
"Follow these steps:\n"
"1. Go to this page: https://dev.twitch.tv/console/apps.\n"
'2. Click "Manage" on your application.\n'
'3. Click on "New secret".\n'
"5. Copy your client ID and your client secret into:\n"
"{command}"
"\n\n"
"Note: These tokens are sensitive and should only be used in a private channel "
"or in DM with the bot."
).format(
command="`[p]set api twitch client_id {} client_secret {}`".format(
_("<your_client_id_here>"), _("<your_client_secret_here>")
)
)
if notified_owner_missing_twitch_secret is False:
await send_to_owners_with_prefix_replaced(self.bot, message)
await self.config.notified_owner_missing_twitch_secret.set(True)
async with aiohttp.ClientSession() as session:
async with session.post(
"https://id.twitch.tv/oauth2/token",
params={
"client_id": tokens.get("client_id", ""),
"client_secret": tokens.get("client_secret", ""),
"grant_type": "client_credentials",
},
) as req:
try:
data = await req.json()
except aiohttp.ContentTypeError:
data = {}
if req.status == 200:
pass
elif req.status == 400 and data.get("message") == "invalid client":
log.error(
"Twitch API request failed authentication: set Client ID is invalid."
)
elif req.status == 403 and data.get("message") == "invalid client secret":
log.error(
"Twitch API request failed authentication: set Client Secret is invalid."
)
elif "message" in data:
log.error(
"Twitch OAuth2 API request failed with status code %s"
" and error message: %s",
req.status,
data["message"],
)
else:
log.error("Twitch OAuth2 API request failed with status code %s", req.status)
if req.status != 200:
return
self.ttv_bearer_cache = data
self.ttv_bearer_cache["expires_at"] = datetime.now().timestamp() + data.get("expires_in")
async def maybe_renew_twitch_bearer_token(self) -> None:
if self.ttv_bearer_cache:
if self.ttv_bearer_cache["expires_at"] - datetime.now().timestamp() <= 60:
await self.get_twitch_bearer_token()
@commands.command()
async def twitchstream(self, ctx: commands.Context, channel_name: str):
"""Check if a Twitch channel is live."""
await self.maybe_renew_twitch_bearer_token()
token = (await self.bot.get_shared_api_tokens("twitch")).get("client_id")
stream = TwitchStream(
name=channel_name, token=token, bearer=self.ttv_bearer_cache.get("access_token", None),
)
await self.check_online(ctx, stream)
@commands.command()
@commands.cooldown(1, 30, commands.BucketType.guild)
async def youtubestream(self, ctx: commands.Context, channel_id_or_name: str):
"""Check if a YouTube channel is live."""
# TODO: Write up a custom check to look up cooldown set by botowner
# This check is here to avoid people spamming this command and eating up quota
apikey = await self.bot.get_shared_api_tokens("youtube")
is_name = self.check_name_or_id(channel_id_or_name)
if is_name:
stream = YoutubeStream(name=channel_id_or_name, token=apikey)
else:
stream = YoutubeStream(id=channel_id_or_name, token=apikey)
await self.check_online(ctx, stream)
@commands.command()
async def smashcast(self, ctx: commands.Context, channel_name: str):
"""Check if a smashcast channel is live."""
stream = HitboxStream(name=channel_name)
await self.check_online(ctx, stream)
@commands.command()
async def picarto(self, ctx: commands.Context, channel_name: str):
"""Check if a Picarto channel is live."""
stream = PicartoStream(name=channel_name)
await self.check_online(ctx, stream)
async def check_online(
self,
ctx: commands.Context,
stream: Union[PicartoStream, HitboxStream, YoutubeStream, TwitchStream],
):
try:
info = await stream.is_online()
except OfflineStream:
await ctx.send(_("That user is offline."))
except StreamNotFound:
await ctx.send(_("That channel doesn't seem to exist."))
except InvalidTwitchCredentials:
await ctx.send(
_("The Twitch token is either invalid or has not been set. See {command}.").format(
command=f"`{ctx.clean_prefix}streamset twitchtoken`"
)
)
except InvalidYoutubeCredentials:
await ctx.send(
_(
"The YouTube API key is either invalid or has not been set. See {command}."
).format(command=f"`{ctx.clean_prefix}streamset youtubekey`")
)
except APIError:
await ctx.send(
_("Something went wrong whilst trying to contact the stream service's API.")
)
else:
if isinstance(info, tuple):
embed, is_rerun = info
ignore_reruns = await self.config.guild(ctx.channel.guild).ignore_reruns()
if ignore_reruns and is_rerun:
await ctx.send(_("That user is offline."))
return
else:
embed = info
await ctx.send(embed=embed)
@commands.group()
@commands.guild_only()
@checks.mod_or_permissions(manage_channels=True)
async def streamalert(self, ctx: commands.Context):
"""Manage automated stream alerts."""
pass
@streamalert.group(name="twitch", invoke_without_command=True)
async def _twitch(self, ctx: commands.Context, channel_name: str = None):
"""Manage Twitch stream notifications."""
if channel_name is not None:
await ctx.invoke(self.twitch_alert_channel, channel_name)
else:
await ctx.send_help()
@_twitch.command(name="channel")
async def twitch_alert_channel(self, ctx: commands.Context, channel_name: str):
"""Toggle alerts in this channel for a Twitch stream."""
if re.fullmatch(r"<#\d+>", channel_name):
await ctx.send(
_("Please supply the name of a *Twitch* channel, not a Discord channel.")
)
return
await self.stream_alert(ctx, TwitchStream, channel_name.lower())
@streamalert.command(name="youtube")
async def youtube_alert(self, ctx: commands.Context, channel_name_or_id: str):
"""Toggle alerts in this channel for a YouTube stream."""
await self.stream_alert(ctx, YoutubeStream, channel_name_or_id)
@streamalert.command(name="smashcast")
async def smashcast_alert(self, ctx: commands.Context, channel_name: str):
"""Toggle alerts in this channel for a Smashcast stream."""
await self.stream_alert(ctx, HitboxStream, channel_name)
@streamalert.command(name="picarto")
async def picarto_alert(self, ctx: commands.Context, channel_name: str):
"""Toggle alerts in this channel for a Picarto stream."""
await self.stream_alert(ctx, PicartoStream, channel_name)
@streamalert.command(name="stop", usage="[disable_all=No]")
async def streamalert_stop(self, ctx: commands.Context, _all: bool = False):
"""Disable all stream alerts in this channel or server.
`[p]streamalert stop` will disable this channel's stream
alerts.
Do `[p]streamalert stop yes` to disable all stream alerts in
this server.
"""
streams = self.streams.copy()
local_channel_ids = [c.id for c in ctx.guild.channels]
to_remove = []
for stream in streams:
for channel_id in stream.channels:
if channel_id == ctx.channel.id:
stream.channels.remove(channel_id)
elif _all and ctx.channel.id in local_channel_ids:
if channel_id in stream.channels:
stream.channels.remove(channel_id)
if not stream.channels:
to_remove.append(stream)
for stream in to_remove:
streams.remove(stream)
self.streams = streams
await self.save_streams()
if _all:
msg = _("All the stream alerts in this server have been disabled.")
else:
msg = _("All the stream alerts in this channel have been disabled.")
await ctx.send(msg)
@streamalert.command(name="list")
async def streamalert_list(self, ctx: commands.Context):
"""List all active stream alerts in this server."""
streams_list = defaultdict(list)
guild_channels_ids = [c.id for c in ctx.guild.channels]
msg = _("Active alerts:\n\n")
for stream in self.streams:
for channel_id in stream.channels:
if channel_id in guild_channels_ids:
streams_list[channel_id].append(stream.name.lower())
if not streams_list:
await ctx.send(_("There are no active alerts in this server."))
return
for channel_id, streams in streams_list.items():
channel = ctx.guild.get_channel(channel_id)
msg += "** - #{}**\n{}\n".format(channel, ", ".join(streams))
for page in pagify(msg):
await ctx.send(page)
async def stream_alert(self, ctx: commands.Context, _class, channel_name):
stream = self.get_stream(_class, channel_name)
if not stream:
token = await self.bot.get_shared_api_tokens(_class.token_name)
is_yt = _class.__name__ == "YoutubeStream"
is_twitch = _class.__name__ == "TwitchStream"
if is_yt and not self.check_name_or_id(channel_name):
stream = _class(id=channel_name, token=token)
elif is_twitch:
await self.maybe_renew_twitch_bearer_token()
stream = _class(
name=channel_name,
token=token.get("client_id"),
bearer=self.ttv_bearer_cache.get("access_token", None),
)
else:
stream = _class(name=channel_name, token=token)
try:
exists = await self.check_exists(stream)
except InvalidTwitchCredentials:
await ctx.send(
_(
"The Twitch token is either invalid or has not been set. See {command}."
).format(command=f"`{ctx.clean_prefix}streamset twitchtoken`")
)
return
except InvalidYoutubeCredentials:
await ctx.send(
_(
"The YouTube API key is either invalid or has not been set. See "
"{command}."
).format(command=f"`{ctx.clean_prefix}streamset youtubekey`")
)
return
except APIError:
await ctx.send(
_("Something went wrong whilst trying to contact the stream service's API.")
)
return
else:
if not exists:
await ctx.send(_("That channel doesn't seem to exist."))
return
await self.add_or_remove(ctx, stream)
@commands.group()
@checks.mod_or_permissions(manage_channels=True)
async def streamset(self, ctx: commands.Context):
"""Manage stream alert settings."""
pass
@streamset.command(name="timer")
@checks.is_owner()
async def _streamset_refresh_timer(self, ctx: commands.Context, refresh_time: int):
"""Set stream check refresh time."""
if refresh_time < 60:
return await ctx.send(_("You cannot set the refresh timer to less than 60 seconds"))
await self.config.refresh_timer.set(refresh_time)
await ctx.send(
_("Refresh timer set to {refresh_time} seconds".format(refresh_time=refresh_time))
)
@streamset.command()
@checks.is_owner()
async def twitchtoken(self, ctx: commands.Context):
"""Explain how to set the twitch token."""
message = _(
"To set the twitch API tokens, follow these steps:\n"
"1. Go to this page: https://dev.twitch.tv/dashboard/apps.\n"
"2. Click *Register Your Application*.\n"
"3. Enter a name, set the OAuth Redirect URI to `http://localhost`, and "
"select an Application Category of your choosing.\n"
"4. Click *Register*.\n"
"5. Copy your client ID and your client secret into:\n"
"{command}"
"\n\n"
"Note: These tokens are sensitive and should only be used in a private channel\n"
"or in DM with the bot.\n"
).format(
command="`{}set api twitch client_id {} client_secret {}`".format(
ctx.clean_prefix, _("<your_client_id_here>"), _("<your_client_secret_here>")
)
)
await ctx.maybe_send_embed(message)
@streamset.command()
@checks.is_owner()
async def youtubekey(self, ctx: commands.Context):
"""Explain how to set the YouTube token."""
message = _(
"To get one, do the following:\n"
"1. Create a project\n"
"(see https://support.google.com/googleapi/answer/6251787 for details)\n"
"2. Enable the YouTube Data API v3 \n"
"(see https://support.google.com/googleapi/answer/6158841 for instructions)\n"
"3. Set up your API key \n"
"(see https://support.google.com/googleapi/answer/6158862 for instructions)\n"
"4. Copy your API key and run the command "
"{command}\n\n"
"Note: These tokens are sensitive and should only be used in a private channel\n"
"or in DM with the bot.\n"
).format(
command="`{}set api youtube api_key {}`".format(
ctx.clean_prefix, _("<your_api_key_here>")
)
)
await ctx.maybe_send_embed(message)
@streamset.group()
@commands.guild_only()
async def message(self, ctx: commands.Context):
"""Manage custom message for stream alerts."""
pass
@message.command(name="mention")
@commands.guild_only()
async def with_mention(self, ctx: commands.Context, *, message: str = None):
"""Set stream alert message when mentions are enabled.
Use `{mention}` in the message to insert the selected mentions.
Use `{stream}` in the message to insert the channel or user name.
For example: `[p]streamset message mention "{mention}, {stream} is live!"`
"""
if message is not None:
guild = ctx.guild
await self.config.guild(guild).live_message_mention.set(message)
await ctx.send(_("Stream alert message set!"))
else:
await ctx.send_help()
@message.command(name="nomention")
@commands.guild_only()
async def without_mention(self, ctx: commands.Context, *, message: str = None):
"""Set stream alert message when mentions are disabled.
Use `{stream}` in the message to insert the channel or user name.
For example: `[p]streamset message nomention "{stream} is live!"`
"""
if message is not None:
guild = ctx.guild
await self.config.guild(guild).live_message_nomention.set(message)
await ctx.send(_("Stream alert message set!"))
else:
await ctx.send_help()
@message.command(name="clear")
@commands.guild_only()
async def clear_message(self, ctx: commands.Context):
"""Reset the stream alert messages in this server."""
guild = ctx.guild
await self.config.guild(guild).live_message_mention.set(False)
await self.config.guild(guild).live_message_nomention.set(False)
await ctx.send(_("Stream alerts in this server will now use the default alert message."))
@streamset.group()
@commands.guild_only()
async def mention(self, ctx: commands.Context):
"""Manage mention settings for stream alerts."""
pass
@mention.command(aliases=["everyone"])
@commands.guild_only()
async def all(self, ctx: commands.Context):
"""Toggle the `@\u200beveryone` mention."""
guild = ctx.guild
current_setting = await self.config.guild(guild).mention_everyone()
if current_setting:
await self.config.guild(guild).mention_everyone.set(False)
await ctx.send(_("`@\u200beveryone` will no longer be mentioned for stream alerts."))
else:
await self.config.guild(guild).mention_everyone.set(True)
await ctx.send(_("When a stream is live, `@\u200beveryone` will be mentioned."))
@mention.command(aliases=["here"])
@commands.guild_only()
async def online(self, ctx: commands.Context):
"""Toggle the `@\u200bhere` mention."""
guild = ctx.guild
current_setting = await self.config.guild(guild).mention_here()
if current_setting:
await self.config.guild(guild).mention_here.set(False)
await ctx.send(_("`@\u200bhere` will no longer be mentioned for stream alerts."))
else:
await self.config.guild(guild).mention_here.set(True)
await ctx.send(_("When a stream is live, `@\u200bhere` will be mentioned."))
@mention.command()
@commands.guild_only()
async def role(self, ctx: commands.Context, *, role: discord.Role):
"""Toggle a role mention."""
current_setting = await self.config.role(role).mention()
if current_setting:
await self.config.role(role).mention.set(False)
await ctx.send(
_("`@\u200b{role.name}` will no longer be mentioned for stream alerts.").format(
role=role
)
)
else:
await self.config.role(role).mention.set(True)
msg = _(
"When a stream or community is live, `@\u200b{role.name}` will be mentioned."
).format(role=role)
if not role.mentionable:
msg += " " + _(
"Since the role is not mentionable, it will be momentarily made mentionable "
"when announcing a streamalert. Please make sure I have the correct "
"permissions to manage this role, or else members of this role won't receive "
"a notification."
)
await ctx.send(msg)
@streamset.command()
@commands.guild_only()
async def autodelete(self, ctx: commands.Context, on_off: bool):
"""Toggle alert deletion for when streams go offline."""
await self.config.guild(ctx.guild).autodelete.set(on_off)
if on_off:
await ctx.send(_("The notifications will be deleted once streams go offline."))
else:
await ctx.send(_("Notifications will no longer be deleted."))
@streamset.command(name="ignorereruns")
@commands.guild_only()
async def ignore_reruns(self, ctx: commands.Context):
"""Toggle excluding rerun streams from alerts."""
guild = ctx.guild
current_setting = await self.config.guild(guild).ignore_reruns()
if current_setting:
await self.config.guild(guild).ignore_reruns.set(False)
await ctx.send(_("Streams of type 'rerun' will be included in alerts."))
else:
await self.config.guild(guild).ignore_reruns.set(True)
await ctx.send(_("Streams of type 'rerun' will no longer send an alert."))
async def add_or_remove(self, ctx: commands.Context, stream):
if ctx.channel.id not in stream.channels:
stream.channels.append(ctx.channel.id)
if stream not in self.streams:
self.streams.append(stream)
await ctx.send(
_(
"I'll now send a notification in this channel when {stream.name} is live."
).format(stream=stream)
)
else:
stream.channels.remove(ctx.channel.id)
if not stream.channels:
self.streams.remove(stream)
await ctx.send(
_(
"I won't send notifications about {stream.name} in this channel anymore."
).format(stream=stream)
)
await self.save_streams()
def get_stream(self, _class, name):
for stream in self.streams:
# if isinstance(stream, _class) and stream.name == name:
# return stream
# Reloading this cog causes an issue with this check ^
# isinstance will always return False
# As a workaround, we'll compare the class' name instead.
# Good enough.
if _class.__name__ == "YoutubeStream" and stream.type == _class.__name__:
# Because name could be a username or a channel id
if self.check_name_or_id(name) and stream.name.lower() == name.lower():
return stream
elif not self.check_name_or_id(name) and stream.id == name:
return stream
elif stream.type == _class.__name__ and stream.name.lower() == name.lower():
return stream
@staticmethod
async def check_exists(stream):
try:
await stream.is_online()
except OfflineStream:
pass
except StreamNotFound:
return False
except StreamsError:
raise
return True
async def _stream_alerts(self):
await self.bot.wait_until_ready()
while True:
try:
await self.check_streams()
except asyncio.CancelledError:
pass
await asyncio.sleep(await self.config.refresh_timer())
async def check_streams(self):
for stream in self.streams:
with contextlib.suppress(Exception):
try:
if stream.__class__.__name__ == "TwitchStream":
await self.maybe_renew_twitch_bearer_token()
embed, is_rerun = await stream.is_online()
else:
embed = await stream.is_online()
is_rerun = False
except OfflineStream:
if not stream._messages_cache:
continue
for message in stream._messages_cache:
with contextlib.suppress(Exception):
if await self.bot.cog_disabled_in_guild(self, message.guild):
continue
autodelete = await self.config.guild(message.guild).autodelete()
if autodelete:
await message.delete()
stream._messages_cache.clear()
await self.save_streams()
else:
if stream._messages_cache:
continue
for channel_id in stream.channels:
channel = self.bot.get_channel(channel_id)
if not channel:
continue
if await self.bot.cog_disabled_in_guild(self, channel.guild):
continue
ignore_reruns = await self.config.guild(channel.guild).ignore_reruns()
if ignore_reruns and is_rerun:
continue
mention_str, edited_roles = await self._get_mention_str(channel.guild)
if mention_str:
alert_msg = await self.config.guild(
channel.guild
).live_message_mention()
if alert_msg:
content = alert_msg # Stop bad things from happening here...
content = content.replace("{stream.name}", str(stream.name)) # Backwards compatability
content = content.replace("{stream}", str(stream.name))
content = content.replace("{mention}", mention_str)
else:
content = _("{mention}, {stream} is live!").format(
mention=mention_str,
stream=escape(
str(stream.name), mass_mentions=True, formatting=True
),
)
else:
alert_msg = await self.config.guild(
channel.guild
).live_message_nomention()
if alert_msg:
content = alert_msg # Stop bad things from happening here...
content = content.replace("{stream.name}", str(stream.name)) # Backwards compatability
content = content.replace("{stream}", str(stream.name))
else:
content = _("{stream} is live!").format(
stream=escape(
str(stream.name), mass_mentions=True, formatting=True
)
)
m = await channel.send(content, embed=embed)
stream._messages_cache.append(m)
if edited_roles:
for role in edited_roles:
await role.edit(mentionable=False)
await self.save_streams()
async def _get_mention_str(self, guild: discord.Guild) -> Tuple[str, List[discord.Role]]:
"""Returns a 2-tuple with the string containing the mentions, and a list of
all roles which need to have their `mentionable` property set back to False.
"""
settings = self.config.guild(guild)
mentions = []
edited_roles = []
if await settings.mention_everyone():
mentions.append("@everyone")
if await settings.mention_here():
mentions.append("@here")
can_manage_roles = guild.me.guild_permissions.manage_roles
for role in guild.roles:
if await self.config.role(role).mention():
if can_manage_roles and not role.mentionable:
try:
await role.edit(mentionable=True)
except discord.Forbidden:
# Might still be unable to edit role based on hierarchy
pass
else:
edited_roles.append(role)
mentions.append(role.mention)
return " ".join(mentions), edited_roles
async def filter_streams(self, streams: list, channel: discord.TextChannel) -> list:
filtered = []
for stream in streams:
tw_id = str(stream["channel"]["_id"])
for alert in self.streams:
if isinstance(alert, TwitchStream) and alert.id == tw_id:
if channel.id in alert.channels:
break
else:
filtered.append(stream)
return filtered
async def load_streams(self):
streams = []
for raw_stream in await self.config.streams():
_class = getattr(_streamtypes, raw_stream["type"], None)
if not _class:
continue
raw_msg_cache = raw_stream["messages"]
raw_stream["_messages_cache"] = []
for raw_msg in raw_msg_cache:
chn = self.bot.get_channel(raw_msg["channel"])
if chn is not None:
try:
msg = await chn.fetch_message(raw_msg["message"])
except discord.HTTPException:
pass
else:
raw_stream["_messages_cache"].append(msg)
token = await self.bot.get_shared_api_tokens(_class.token_name)
if token:
if _class.__name__ == "TwitchStream":
raw_stream["token"] = token.get("client_id")
raw_stream["bearer"] = self.ttv_bearer_cache.get("access_token", None)
else:
raw_stream["token"] = token
streams.append(_class(**raw_stream))
return streams
async def save_streams(self):
raw_streams = []
for stream in self.streams:
raw_streams.append(stream.export())
await self.config.streams.set(raw_streams)
def cog_unload(self):
if self.task:
self.task.cancel()
__del__ = cog_unload
| ./CrossVul/dataset_final_sorted/CWE-94/py/good_4113_0 |
crossvul-python_data_bad_663_0 | # -*- coding: utf-8 -*-
"""
eve.io.mongo.parser
~~~~~~~~~~~~~~~~~~~
This module implements a Python-to-Mongo syntax parser. Allows the MongoDB
data-layer to seamlessly respond to a Python-like query.
:copyright: (c) 2017 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import ast
import sys
from datetime import datetime # noqa
from bson import ObjectId # noqa
def parse(expression):
""" Given a python-like conditional statement, returns the equivalent
mongo-like query expression. Conditional and boolean operators (==, <=, >=,
!=, >, <) along with a couple function calls (ObjectId(), datetime()) are
supported.
"""
v = MongoVisitor()
try:
v.visit(ast.parse(expression))
except SyntaxError as e:
e = ParseError(e)
e.__traceback__ = sys.exc_info()[2]
raise e
return v.mongo_query
class ParseError(ValueError):
pass
class MongoVisitor(ast.NodeVisitor):
""" Implements the python-to-mongo parser. Only Python conditional
statements are supported, however nested, combined with most common compare
and boolean operators (And and Or).
Supported compare operators: ==, >, <, !=, >=, <=
Supported boolean operators: And, Or
"""
op_mapper = {
ast.Eq: '',
ast.Gt: '$gt',
ast.GtE: '$gte',
ast.Lt: '$lt',
ast.LtE: '$lte',
ast.NotEq: '$ne',
ast.Or: '$or',
ast.And: '$and'
}
def visit_Module(self, node):
""" Module handler, our entry point.
"""
self.mongo_query = {}
self.ops = []
self.current_value = None
# perform the magic.
self.generic_visit(node)
# if we didn't obtain a query, it is likely that an unsupported
# python expression has been passed.
if self.mongo_query == {}:
raise ParseError("Only conditional statements with boolean "
"(and, or) and comparison operators are "
"supported.")
def visit_Expr(self, node):
""" Make sure that we are parsing compare or boolean operators
"""
if not (isinstance(node.value, ast.Compare) or
isinstance(node.value, ast.BoolOp)):
raise ParseError("Will only parse conditional statements")
self.generic_visit(node)
def visit_Compare(self, node):
""" Compare operator handler.
"""
self.visit(node.left)
left = self.current_value
operator = self.op_mapper[node.ops[0].__class__] if node.ops else None
if node.comparators:
comparator = node.comparators[0]
self.visit(comparator)
if operator != '':
value = {operator: self.current_value}
else:
value = self.current_value
if self.ops:
self.ops[-1].append({left: value})
else:
self.mongo_query[left] = value
def visit_BoolOp(self, node):
""" Boolean operator handler.
"""
op = self.op_mapper[node.op.__class__]
self.ops.append([])
for value in node.values:
self.visit(value)
c = self.ops.pop()
if self.ops:
self.ops[-1].append({op: c})
else:
self.mongo_query[op] = c
def visit_Call(self, node):
""" A couple function calls are supported: bson's ObjectId() and
datetime().
"""
if isinstance(node.func, ast.Name):
expr = None
if node.func.id == 'ObjectId':
expr = "('" + node.args[0].s + "')"
elif node.func.id == 'datetime':
values = []
for arg in node.args:
values.append(str(arg.n))
expr = "(" + ", ".join(values) + ")"
if expr:
self.current_value = eval(node.func.id + expr)
def visit_Attribute(self, node):
""" Attribute handler ('Contact.Id').
"""
self.visit(node.value)
self.current_value += "." + node.attr
def visit_Name(self, node):
""" Names handler.
"""
self.current_value = node.id
def visit_Num(self, node):
""" Numbers handler.
"""
self.current_value = node.n
def visit_Str(self, node):
""" Strings handler.
"""
self.current_value = node.s
| ./CrossVul/dataset_final_sorted/CWE-94/py/bad_663_0 |
crossvul-python_data_good_3754_4 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.common.memcached import MemcacheRing
class MemcacheMiddleware(object):
"""
Caching middleware that manages caching in swift.
"""
def __init__(self, app, conf):
self.app = app
self.memcache_servers = conf.get('memcache_servers')
serialization_format = conf.get('memcache_serialization_support')
if not self.memcache_servers or serialization_format is None:
path = os.path.join(conf.get('swift_dir', '/etc/swift'),
'memcache.conf')
memcache_conf = ConfigParser()
if memcache_conf.read(path):
if not self.memcache_servers:
try:
self.memcache_servers = \
memcache_conf.get('memcache', 'memcache_servers')
except (NoSectionError, NoOptionError):
pass
if serialization_format is None:
try:
serialization_format = \
memcache_conf.get('memcache',
'memcache_serialization_support')
except (NoSectionError, NoOptionError):
pass
if not self.memcache_servers:
self.memcache_servers = '127.0.0.1:11211'
if serialization_format is None:
serialization_format = 2
self.memcache = MemcacheRing(
[s.strip() for s in self.memcache_servers.split(',') if s.strip()],
allow_pickle=(serialization_format == 0),
allow_unpickle=(serialization_format <= 1))
def __call__(self, env, start_response):
env['swift.cache'] = self.memcache
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def cache_filter(app):
return MemcacheMiddleware(app, conf)
return cache_filter
| ./CrossVul/dataset_final_sorted/CWE-94/py/good_3754_4 |
crossvul-python_data_bad_3754_3 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lucid comes with memcached: v1.4.2. Protocol documentation for that
version is at:
http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt
"""
import cPickle as pickle
import logging
import socket
import time
from bisect import bisect
from hashlib import md5
DEFAULT_MEMCACHED_PORT = 11211
CONN_TIMEOUT = 0.3
IO_TIMEOUT = 2.0
PICKLE_FLAG = 1
NODE_WEIGHT = 50
PICKLE_PROTOCOL = 2
TRY_COUNT = 3
# if ERROR_LIMIT_COUNT errors occur in ERROR_LIMIT_TIME seconds, the server
# will be considered failed for ERROR_LIMIT_DURATION seconds.
ERROR_LIMIT_COUNT = 10
ERROR_LIMIT_TIME = 60
ERROR_LIMIT_DURATION = 60
def md5hash(key):
return md5(key).hexdigest()
class MemcacheConnectionError(Exception):
pass
class MemcacheRing(object):
"""
Simple, consistent-hashed memcache client.
"""
def __init__(self, servers, connect_timeout=CONN_TIMEOUT,
io_timeout=IO_TIMEOUT, tries=TRY_COUNT):
self._ring = {}
self._errors = dict(((serv, []) for serv in servers))
self._error_limited = dict(((serv, 0) for serv in servers))
for server in sorted(servers):
for i in xrange(NODE_WEIGHT):
self._ring[md5hash('%s-%s' % (server, i))] = server
self._tries = tries if tries <= len(servers) else len(servers)
self._sorted = sorted(self._ring.keys())
self._client_cache = dict(((server, []) for server in servers))
self._connect_timeout = connect_timeout
self._io_timeout = io_timeout
def _exception_occurred(self, server, e, action='talking'):
if isinstance(e, socket.timeout):
logging.error(_("Timeout %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
else:
logging.exception(_("Error %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
now = time.time()
self._errors[server].append(time.time())
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._errors[server] = [err for err in self._errors[server]
if err > now - ERROR_LIMIT_TIME]
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._error_limited[server] = now + ERROR_LIMIT_DURATION
logging.error(_('Error limiting server %s'), server)
def _get_conns(self, key):
"""
Retrieves a server conn from the pool, or connects a new one.
Chooses the server based on a consistent hash of "key".
"""
pos = bisect(self._sorted, key)
served = []
while len(served) < self._tries:
pos = (pos + 1) % len(self._sorted)
server = self._ring[self._sorted[pos]]
if server in served:
continue
served.append(server)
if self._error_limited[server] > time.time():
continue
try:
fp, sock = self._client_cache[server].pop()
yield server, fp, sock
except IndexError:
try:
if ':' in server:
host, port = server.split(':')
else:
host = server
port = DEFAULT_MEMCACHED_PORT
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(self._connect_timeout)
sock.connect((host, int(port)))
sock.settimeout(self._io_timeout)
yield server, sock.makefile(), sock
except Exception, e:
self._exception_occurred(server, e, 'connecting')
def _return_conn(self, server, fp, sock):
""" Returns a server connection to the pool """
self._client_cache[server].append((fp, sock))
def set(self, key, value, serialize=True, timeout=0):
"""
Set a key/value pair in memcache
:param key: key
:param value: value
:param serialize: if True, value is pickled before sending to memcache
:param timeout: ttl in memcache
"""
key = md5hash(key)
if timeout > 0:
timeout += time.time()
flags = 0
if serialize:
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
for (server, fp, sock) in self._get_conns(key):
try:
sock.sendall('set %s %d %d %s noreply\r\n%s\r\n' % \
(key, flags, timeout, len(value), value))
self._return_conn(server, fp, sock)
return
except Exception, e:
self._exception_occurred(server, e)
def get(self, key):
"""
Gets the object specified by key. It will also unpickle the object
before returning if it is pickled in memcache.
:param key: key
:returns: value of the key in memcache
"""
key = md5hash(key)
value = None
for (server, fp, sock) in self._get_conns(key):
try:
sock.sendall('get %s\r\n' % key)
line = fp.readline().strip().split()
while line[0].upper() != 'END':
if line[0].upper() == 'VALUE' and line[1] == key:
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
value = pickle.loads(value)
fp.readline()
line = fp.readline().strip().split()
self._return_conn(server, fp, sock)
return value
except Exception, e:
self._exception_occurred(server, e)
def incr(self, key, delta=1, timeout=0):
"""
Increments a key which has a numeric value by delta.
If the key can't be found, it's added as delta or 0 if delta < 0.
If passed a negative number, will use memcached's decr. Returns
the int stored in memcached
Note: The data memcached stores as the result of incr/decr is
an unsigned int. decr's that result in a number below 0 are
stored as 0.
:param key: key
:param delta: amount to add to the value of key (or set as the value
if the key is not found) will be cast to an int
:param timeout: ttl in memcache
:raises MemcacheConnectionError:
"""
key = md5hash(key)
command = 'incr'
if delta < 0:
command = 'decr'
delta = str(abs(int(delta)))
for (server, fp, sock) in self._get_conns(key):
try:
sock.sendall('%s %s %s\r\n' % (command, key, delta))
line = fp.readline().strip().split()
if line[0].upper() == 'NOT_FOUND':
add_val = delta
if command == 'decr':
add_val = '0'
sock.sendall('add %s %d %d %s\r\n%s\r\n' % \
(key, 0, timeout, len(add_val), add_val))
line = fp.readline().strip().split()
if line[0].upper() == 'NOT_STORED':
sock.sendall('%s %s %s\r\n' % (command, key, delta))
line = fp.readline().strip().split()
ret = int(line[0].strip())
else:
ret = int(add_val)
else:
ret = int(line[0].strip())
self._return_conn(server, fp, sock)
return ret
except Exception, e:
self._exception_occurred(server, e)
raise MemcacheConnectionError("No Memcached connections succeeded.")
def decr(self, key, delta=1, timeout=0):
"""
Decrements a key which has a numeric value by delta. Calls incr with
-delta.
:param key: key
:param delta: amount to subtract to the value of key (or set the
value to 0 if the key is not found) will be cast to
an int
:param timeout: ttl in memcache
:raises MemcacheConnectionError:
"""
self.incr(key, delta=-delta, timeout=timeout)
def delete(self, key):
"""
Deletes a key/value pair from memcache.
:param key: key to be deleted
"""
key = md5hash(key)
for (server, fp, sock) in self._get_conns(key):
try:
sock.sendall('delete %s noreply\r\n' % key)
self._return_conn(server, fp, sock)
return
except Exception, e:
self._exception_occurred(server, e)
def set_multi(self, mapping, server_key, serialize=True, timeout=0):
"""
Sets multiple key/value pairs in memcache.
:param mapping: dictonary of keys and values to be set in memcache
:param servery_key: key to use in determining which server in the ring
is used
:param serialize: if True, value is pickled before sending to memcache
:param timeout: ttl for memcache
"""
server_key = md5hash(server_key)
if timeout > 0:
timeout += time.time()
msg = ''
for key, value in mapping.iteritems():
key = md5hash(key)
flags = 0
if serialize:
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
msg += ('set %s %d %d %s noreply\r\n%s\r\n' %
(key, flags, timeout, len(value), value))
for (server, fp, sock) in self._get_conns(server_key):
try:
sock.sendall(msg)
self._return_conn(server, fp, sock)
return
except Exception, e:
self._exception_occurred(server, e)
def get_multi(self, keys, server_key):
"""
Gets multiple values from memcache for the given keys.
:param keys: keys for values to be retrieved from memcache
:param servery_key: key to use in determining which server in the ring
is used
:returns: list of values
"""
server_key = md5hash(server_key)
keys = [md5hash(key) for key in keys]
for (server, fp, sock) in self._get_conns(server_key):
try:
sock.sendall('get %s\r\n' % ' '.join(keys))
line = fp.readline().strip().split()
responses = {}
while line[0].upper() != 'END':
if line[0].upper() == 'VALUE':
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
value = pickle.loads(value)
responses[line[1]] = value
fp.readline()
line = fp.readline().strip().split()
values = []
for key in keys:
if key in responses:
values.append(responses[key])
else:
values.append(None)
self._return_conn(server, fp, sock)
return values
except Exception, e:
self._exception_occurred(server, e)
| ./CrossVul/dataset_final_sorted/CWE-94/py/bad_3754_3 |
crossvul-python_data_bad_3064_4 | import yaml
try:
from ansible.utils.vault import VaultLib
except ImportError:
# Ansible 2.0 has changed the vault location
from ansible.parsing.vault import VaultLib
class Vault(object):
'''R/W an ansible-vault yaml file'''
def __init__(self, password):
self.password = password
self.vault = VaultLib(password)
def load(self, stream):
'''read vault steam and return python object'''
return yaml.load(self.vault.decrypt(stream))
def dump(self, data, stream=None):
'''encrypt data and print stdout or write to stream'''
yaml_text = yaml.dump(
data,
default_flow_style=False,
allow_unicode=True)
encrypted = self.vault.encrypt(yaml_text)
if stream:
stream.write(encrypted)
else:
return encrypted
| ./CrossVul/dataset_final_sorted/CWE-94/py/bad_3064_4 |
crossvul-python_data_good_3754_3 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lucid comes with memcached: v1.4.2. Protocol documentation for that
version is at:
http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt
"""
import cPickle as pickle
import logging
import socket
import time
from bisect import bisect
from hashlib import md5
try:
import simplejson as json
except ImportError:
import json
DEFAULT_MEMCACHED_PORT = 11211
CONN_TIMEOUT = 0.3
IO_TIMEOUT = 2.0
PICKLE_FLAG = 1
JSON_FLAG = 2
NODE_WEIGHT = 50
PICKLE_PROTOCOL = 2
TRY_COUNT = 3
# if ERROR_LIMIT_COUNT errors occur in ERROR_LIMIT_TIME seconds, the server
# will be considered failed for ERROR_LIMIT_DURATION seconds.
ERROR_LIMIT_COUNT = 10
ERROR_LIMIT_TIME = 60
ERROR_LIMIT_DURATION = 60
def md5hash(key):
return md5(key).hexdigest()
class MemcacheConnectionError(Exception):
pass
class MemcacheRing(object):
"""
Simple, consistent-hashed memcache client.
"""
def __init__(self, servers, connect_timeout=CONN_TIMEOUT,
io_timeout=IO_TIMEOUT, tries=TRY_COUNT,
allow_pickle=False, allow_unpickle=False):
self._ring = {}
self._errors = dict(((serv, []) for serv in servers))
self._error_limited = dict(((serv, 0) for serv in servers))
for server in sorted(servers):
for i in xrange(NODE_WEIGHT):
self._ring[md5hash('%s-%s' % (server, i))] = server
self._tries = tries if tries <= len(servers) else len(servers)
self._sorted = sorted(self._ring.keys())
self._client_cache = dict(((server, []) for server in servers))
self._connect_timeout = connect_timeout
self._io_timeout = io_timeout
self._allow_pickle = allow_pickle
self._allow_unpickle = allow_unpickle or allow_pickle
def _exception_occurred(self, server, e, action='talking'):
if isinstance(e, socket.timeout):
logging.error(_("Timeout %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
else:
logging.exception(_("Error %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
now = time.time()
self._errors[server].append(time.time())
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._errors[server] = [err for err in self._errors[server]
if err > now - ERROR_LIMIT_TIME]
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._error_limited[server] = now + ERROR_LIMIT_DURATION
logging.error(_('Error limiting server %s'), server)
def _get_conns(self, key):
"""
Retrieves a server conn from the pool, or connects a new one.
Chooses the server based on a consistent hash of "key".
"""
pos = bisect(self._sorted, key)
served = []
while len(served) < self._tries:
pos = (pos + 1) % len(self._sorted)
server = self._ring[self._sorted[pos]]
if server in served:
continue
served.append(server)
if self._error_limited[server] > time.time():
continue
try:
fp, sock = self._client_cache[server].pop()
yield server, fp, sock
except IndexError:
try:
if ':' in server:
host, port = server.split(':')
else:
host = server
port = DEFAULT_MEMCACHED_PORT
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(self._connect_timeout)
sock.connect((host, int(port)))
sock.settimeout(self._io_timeout)
yield server, sock.makefile(), sock
except Exception, e:
self._exception_occurred(server, e, 'connecting')
def _return_conn(self, server, fp, sock):
""" Returns a server connection to the pool """
self._client_cache[server].append((fp, sock))
def set(self, key, value, serialize=True, timeout=0):
"""
Set a key/value pair in memcache
:param key: key
:param value: value
:param serialize: if True, value is serialized with JSON before sending
to memcache, or with pickle if configured to use
pickle instead of JSON (to avoid cache poisoning)
:param timeout: ttl in memcache
"""
key = md5hash(key)
if timeout > 0:
timeout += time.time()
flags = 0
if serialize and self._allow_pickle:
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
elif serialize:
value = json.dumps(value)
flags |= JSON_FLAG
for (server, fp, sock) in self._get_conns(key):
try:
sock.sendall('set %s %d %d %s noreply\r\n%s\r\n' % \
(key, flags, timeout, len(value), value))
self._return_conn(server, fp, sock)
return
except Exception, e:
self._exception_occurred(server, e)
def get(self, key):
"""
Gets the object specified by key. It will also unserialize the object
before returning if it is serialized in memcache with JSON, or if it
is pickled and unpickling is allowed.
:param key: key
:returns: value of the key in memcache
"""
key = md5hash(key)
value = None
for (server, fp, sock) in self._get_conns(key):
try:
sock.sendall('get %s\r\n' % key)
line = fp.readline().strip().split()
while line[0].upper() != 'END':
if line[0].upper() == 'VALUE' and line[1] == key:
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
if self._allow_unpickle:
value = pickle.loads(value)
else:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
fp.readline()
line = fp.readline().strip().split()
self._return_conn(server, fp, sock)
return value
except Exception, e:
self._exception_occurred(server, e)
def incr(self, key, delta=1, timeout=0):
"""
Increments a key which has a numeric value by delta.
If the key can't be found, it's added as delta or 0 if delta < 0.
If passed a negative number, will use memcached's decr. Returns
the int stored in memcached
Note: The data memcached stores as the result of incr/decr is
an unsigned int. decr's that result in a number below 0 are
stored as 0.
:param key: key
:param delta: amount to add to the value of key (or set as the value
if the key is not found) will be cast to an int
:param timeout: ttl in memcache
:raises MemcacheConnectionError:
"""
key = md5hash(key)
command = 'incr'
if delta < 0:
command = 'decr'
delta = str(abs(int(delta)))
for (server, fp, sock) in self._get_conns(key):
try:
sock.sendall('%s %s %s\r\n' % (command, key, delta))
line = fp.readline().strip().split()
if line[0].upper() == 'NOT_FOUND':
add_val = delta
if command == 'decr':
add_val = '0'
sock.sendall('add %s %d %d %s\r\n%s\r\n' % \
(key, 0, timeout, len(add_val), add_val))
line = fp.readline().strip().split()
if line[0].upper() == 'NOT_STORED':
sock.sendall('%s %s %s\r\n' % (command, key, delta))
line = fp.readline().strip().split()
ret = int(line[0].strip())
else:
ret = int(add_val)
else:
ret = int(line[0].strip())
self._return_conn(server, fp, sock)
return ret
except Exception, e:
self._exception_occurred(server, e)
raise MemcacheConnectionError("No Memcached connections succeeded.")
def decr(self, key, delta=1, timeout=0):
"""
Decrements a key which has a numeric value by delta. Calls incr with
-delta.
:param key: key
:param delta: amount to subtract to the value of key (or set the
value to 0 if the key is not found) will be cast to
an int
:param timeout: ttl in memcache
:raises MemcacheConnectionError:
"""
self.incr(key, delta=-delta, timeout=timeout)
def delete(self, key):
"""
Deletes a key/value pair from memcache.
:param key: key to be deleted
"""
key = md5hash(key)
for (server, fp, sock) in self._get_conns(key):
try:
sock.sendall('delete %s noreply\r\n' % key)
self._return_conn(server, fp, sock)
return
except Exception, e:
self._exception_occurred(server, e)
def set_multi(self, mapping, server_key, serialize=True, timeout=0):
"""
Sets multiple key/value pairs in memcache.
:param mapping: dictonary of keys and values to be set in memcache
:param servery_key: key to use in determining which server in the ring
is used
:param serialize: if True, value is serialized with JSON before sending
to memcache, or with pickle if configured to use
pickle instead of JSON (to avoid cache poisoning)
:param timeout: ttl for memcache
"""
server_key = md5hash(server_key)
if timeout > 0:
timeout += time.time()
msg = ''
for key, value in mapping.iteritems():
key = md5hash(key)
flags = 0
if serialize and self._allow_pickle:
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
elif serialize:
value = json.dumps(value)
flags |= JSON_FLAG
msg += ('set %s %d %d %s noreply\r\n%s\r\n' %
(key, flags, timeout, len(value), value))
for (server, fp, sock) in self._get_conns(server_key):
try:
sock.sendall(msg)
self._return_conn(server, fp, sock)
return
except Exception, e:
self._exception_occurred(server, e)
def get_multi(self, keys, server_key):
"""
Gets multiple values from memcache for the given keys.
:param keys: keys for values to be retrieved from memcache
:param servery_key: key to use in determining which server in the ring
is used
:returns: list of values
"""
server_key = md5hash(server_key)
keys = [md5hash(key) for key in keys]
for (server, fp, sock) in self._get_conns(server_key):
try:
sock.sendall('get %s\r\n' % ' '.join(keys))
line = fp.readline().strip().split()
responses = {}
while line[0].upper() != 'END':
if line[0].upper() == 'VALUE':
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
if self._allow_unpickle:
value = pickle.loads(value)
else:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
responses[line[1]] = value
fp.readline()
line = fp.readline().strip().split()
values = []
for key in keys:
if key in responses:
values.append(responses[key])
else:
values.append(None)
self._return_conn(server, fp, sock)
return values
except Exception, e:
self._exception_occurred(server, e)
| ./CrossVul/dataset_final_sorted/CWE-94/py/good_3754_3 |
crossvul-python_data_good_4111_1 | """ A FastAPI app used to create an OpenAPI document for end-to-end testing """
import json
from datetime import date, datetime
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Union
from fastapi import APIRouter, Body, FastAPI, File, Header, Query, UploadFile
from pydantic import BaseModel
app = FastAPI(title="My Test API", description="An API for testing openapi-python-client",)
@app.get("/ping", response_model=bool)
async def ping():
""" A quick check to see if the system is running """
return True
test_router = APIRouter()
class AnEnum(Enum):
""" For testing Enums in all the ways they can be used """
FIRST_VALUE = "FIRST_VALUE"
SECOND_VALUE = "SECOND_VALUE"
class DifferentEnum(Enum):
FIRST_VALUE = "DIFFERENT"
SECOND_VALUE = "OTHER"
class OtherModel(BaseModel):
""" A different model for calling from TestModel """
a_value: str
class AModel(BaseModel):
""" A Model for testing all the ways custom objects can be used """
an_enum_value: AnEnum
nested_list_of_enums: List[List[DifferentEnum]] = []
some_dict: Dict[str, str]
aCamelDateTime: Union[datetime, date]
a_date: date
@test_router.get("/", response_model=List[AModel], operation_id="getUserList")
def get_list(
an_enum_value: List[AnEnum] = Query(...), some_date: Union[date, datetime] = Query(...),
):
""" Get a list of things """
return
@test_router.post("/upload")
async def upload_file(some_file: UploadFile = File(...), keep_alive: bool = Header(None)):
""" Upload a file """
data = await some_file.read()
return (some_file.filename, some_file.content_type, data)
@test_router.post("/json_body")
def json_body(body: AModel):
""" Try sending a JSON body """
return
@test_router.post("/test_defaults")
def test_defaults(
string_prop: str = Query(default="the default string"),
datetime_prop: datetime = Query(default=datetime(1010, 10, 10)),
date_prop: date = Query(default=date(1010, 10, 10)),
float_prop: float = Query(default=3.14),
int_prop: int = Query(default=7),
boolean_prop: bool = Query(default=False),
list_prop: List[AnEnum] = Query(default=[AnEnum.FIRST_VALUE, AnEnum.SECOND_VALUE]),
union_prop: Union[float, str] = Query(default="not a float"),
enum_prop: AnEnum = Query(default=AnEnum.FIRST_VALUE),
dict_prop: Dict[str, str] = Body(default={"key": "val"}),
):
return
app.include_router(test_router, prefix="/tests", tags=["tests"])
def generate_openapi_json():
path = Path(__file__).parent / "openapi.json"
path.write_text(json.dumps(app.openapi(), indent=4))
if __name__ == "__main__":
generate_openapi_json()
| ./CrossVul/dataset_final_sorted/CWE-94/py/good_4111_1 |
crossvul-python_data_bad_3754_4 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.common.memcached import MemcacheRing
class MemcacheMiddleware(object):
"""
Caching middleware that manages caching in swift.
"""
def __init__(self, app, conf):
self.app = app
self.memcache_servers = conf.get('memcache_servers')
if not self.memcache_servers:
path = os.path.join(conf.get('swift_dir', '/etc/swift'),
'memcache.conf')
memcache_conf = ConfigParser()
if memcache_conf.read(path):
try:
self.memcache_servers = \
memcache_conf.get('memcache', 'memcache_servers')
except (NoSectionError, NoOptionError):
pass
if not self.memcache_servers:
self.memcache_servers = '127.0.0.1:11211'
self.memcache = MemcacheRing(
[s.strip() for s in self.memcache_servers.split(',') if s.strip()])
def __call__(self, env, start_response):
env['swift.cache'] = self.memcache
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def cache_filter(app):
return MemcacheMiddleware(app, conf)
return cache_filter
| ./CrossVul/dataset_final_sorted/CWE-94/py/bad_3754_4 |
crossvul-python_data_bad_4111_1 | """ A FastAPI app used to create an OpenAPI document for end-to-end testing """
import json
from datetime import date, datetime
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Union
from fastapi import APIRouter, FastAPI, File, Header, Query, UploadFile
from pydantic import BaseModel
app = FastAPI(title="My Test API", description="An API for testing openapi-python-client",)
@app.get("/ping", response_model=bool)
async def ping():
""" A quick check to see if the system is running """
return True
test_router = APIRouter()
class AnEnum(Enum):
""" For testing Enums in all the ways they can be used """
FIRST_VALUE = "FIRST_VALUE"
SECOND_VALUE = "SECOND_VALUE"
class DifferentEnum(Enum):
FIRST_VALUE = "DIFFERENT"
SECOND_VALUE = "OTHER"
class OtherModel(BaseModel):
""" A different model for calling from TestModel """
a_value: str
class AModel(BaseModel):
""" A Model for testing all the ways custom objects can be used """
an_enum_value: AnEnum
nested_list_of_enums: List[List[DifferentEnum]] = []
some_dict: Dict[str, str] = {}
aCamelDateTime: Union[datetime, date]
a_date: date
@test_router.get("/", response_model=List[AModel], operation_id="getUserList")
def get_list(an_enum_value: List[AnEnum] = Query(...), some_date: Union[date, datetime] = Query(...)):
""" Get a list of things """
return
@test_router.post("/upload")
async def upload_file(some_file: UploadFile = File(...), keep_alive: bool = Header(None)):
""" Upload a file """
data = await some_file.read()
return (some_file.filename, some_file.content_type, data)
@test_router.post("/json_body")
def json_body(body: AModel):
""" Try sending a JSON body """
return
app.include_router(test_router, prefix="/tests", tags=["tests"])
def generate_openapi_json():
path = Path(__file__).parent / "openapi.json"
path.write_text(json.dumps(app.openapi(), indent=4))
if __name__ == "__main__":
generate_openapi_json()
| ./CrossVul/dataset_final_sorted/CWE-94/py/bad_4111_1 |
crossvul-python_data_good_3064_4 | import yaml
try:
from ansible.utils.vault import VaultLib
except ImportError:
# Ansible 2.0 has changed the vault location
from ansible.parsing.vault import VaultLib
class Vault(object):
'''R/W an ansible-vault yaml file'''
def __init__(self, password):
self.password = password
self.vault = VaultLib(password)
def load(self, stream):
'''read vault steam and return python object'''
return yaml.safe_load(self.vault.decrypt(stream))
def dump(self, data, stream=None):
'''encrypt data and print stdout or write to stream'''
yaml_text = yaml.dump(
data,
default_flow_style=False,
allow_unicode=True)
encrypted = self.vault.encrypt(yaml_text)
if stream:
stream.write(encrypted)
else:
return encrypted
| ./CrossVul/dataset_final_sorted/CWE-94/py/good_3064_4 |
crossvul-python_data_bad_3064_8 | import sys
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def _read(fname):
here = os.path.dirname(os.path.abspath(__file__))
return open(os.path.join(here, fname)).read()
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
if len(sys.argv) == 2:
self.pytest_args = ['ansible_vault']
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
sys.exit(pytest.main(self.pytest_args))
setup(
name='ansible-vault',
version='1.0.4',
author='Tomohiro NAKAMURA',
author_email='quickness.net@gmail.com',
url='https://github.com/jptomo/ansible-vault',
description='R/W an ansible-vault yaml file',
long_description=_read('README.rst'),
packages=find_packages(),
install_requires=['ansible'],
tests_require=['pytest', 'testfixtures'],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
],
license='GPLv3',
)
| ./CrossVul/dataset_final_sorted/CWE-94/py/bad_3064_8 |
crossvul-python_data_bad_4113_0 | import discord
from redbot.core.bot import Red
from redbot.core import checks, commands, Config
from redbot.core.i18n import cog_i18n, Translator
from redbot.core.utils._internal_utils import send_to_owners_with_prefix_replaced
from redbot.core.utils.chat_formatting import escape, pagify
from .streamtypes import (
HitboxStream,
PicartoStream,
Stream,
TwitchStream,
YoutubeStream,
)
from .errors import (
APIError,
InvalidTwitchCredentials,
InvalidYoutubeCredentials,
OfflineStream,
StreamNotFound,
StreamsError,
)
from . import streamtypes as _streamtypes
import re
import logging
import asyncio
import aiohttp
import contextlib
from datetime import datetime
from collections import defaultdict
from typing import Optional, List, Tuple, Union, Dict
_ = Translator("Streams", __file__)
log = logging.getLogger("red.core.cogs.Streams")
@cog_i18n(_)
class Streams(commands.Cog):
"""Various commands relating to streaming platforms.
You can check if a Twitch, YouTube or Picarto stream is
currently live.
"""
global_defaults = {
"refresh_timer": 300,
"tokens": {},
"streams": [],
"notified_owner_missing_twitch_secret": False,
}
guild_defaults = {
"autodelete": False,
"mention_everyone": False,
"mention_here": False,
"live_message_mention": False,
"live_message_nomention": False,
"ignore_reruns": False,
}
role_defaults = {"mention": False}
def __init__(self, bot: Red):
super().__init__()
self.config: Config = Config.get_conf(self, 26262626)
self.ttv_bearer_cache: dict = {}
self.config.register_global(**self.global_defaults)
self.config.register_guild(**self.guild_defaults)
self.config.register_role(**self.role_defaults)
self.bot: Red = bot
self.streams: List[Stream] = []
self.task: Optional[asyncio.Task] = None
self.yt_cid_pattern = re.compile("^UC[-_A-Za-z0-9]{21}[AQgw]$")
self._ready_event: asyncio.Event = asyncio.Event()
self._init_task: asyncio.Task = self.bot.loop.create_task(self.initialize())
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete """
return
def check_name_or_id(self, data: str) -> bool:
matched = self.yt_cid_pattern.fullmatch(data)
if matched is None:
return True
return False
async def initialize(self) -> None:
"""Should be called straight after cog instantiation."""
await self.bot.wait_until_ready()
try:
await self.move_api_keys()
await self.get_twitch_bearer_token()
self.streams = await self.load_streams()
self.task = self.bot.loop.create_task(self._stream_alerts())
except Exception as error:
log.exception("Failed to initialize Streams cog:", exc_info=error)
self._ready_event.set()
@commands.Cog.listener()
async def on_red_api_tokens_update(self, service_name, api_tokens):
if service_name == "twitch":
await self.get_twitch_bearer_token(api_tokens)
async def cog_before_invoke(self, ctx: commands.Context):
await self._ready_event.wait()
async def move_api_keys(self) -> None:
"""Move the API keys from cog stored config to core bot config if they exist."""
tokens = await self.config.tokens()
youtube = await self.bot.get_shared_api_tokens("youtube")
twitch = await self.bot.get_shared_api_tokens("twitch")
for token_type, token in tokens.items():
if token_type == "YoutubeStream" and "api_key" not in youtube:
await self.bot.set_shared_api_tokens("youtube", api_key=token)
if token_type == "TwitchStream" and "client_id" not in twitch:
# Don't need to check Community since they're set the same
await self.bot.set_shared_api_tokens("twitch", client_id=token)
await self.config.tokens.clear()
async def get_twitch_bearer_token(self, api_tokens: Optional[Dict] = None) -> None:
tokens = (
await self.bot.get_shared_api_tokens("twitch") if api_tokens is None else api_tokens
)
if tokens.get("client_id"):
notified_owner_missing_twitch_secret = (
await self.config.notified_owner_missing_twitch_secret()
)
try:
tokens["client_secret"]
if notified_owner_missing_twitch_secret is True:
await self.config.notified_owner_missing_twitch_secret.set(False)
except KeyError:
message = _(
"You need a client secret key if you want to use the Twitch API on this cog.\n"
"Follow these steps:\n"
"1. Go to this page: https://dev.twitch.tv/console/apps.\n"
'2. Click "Manage" on your application.\n'
'3. Click on "New secret".\n'
"5. Copy your client ID and your client secret into:\n"
"{command}"
"\n\n"
"Note: These tokens are sensitive and should only be used in a private channel "
"or in DM with the bot."
).format(
command="`[p]set api twitch client_id {} client_secret {}`".format(
_("<your_client_id_here>"), _("<your_client_secret_here>")
)
)
if notified_owner_missing_twitch_secret is False:
await send_to_owners_with_prefix_replaced(self.bot, message)
await self.config.notified_owner_missing_twitch_secret.set(True)
async with aiohttp.ClientSession() as session:
async with session.post(
"https://id.twitch.tv/oauth2/token",
params={
"client_id": tokens.get("client_id", ""),
"client_secret": tokens.get("client_secret", ""),
"grant_type": "client_credentials",
},
) as req:
try:
data = await req.json()
except aiohttp.ContentTypeError:
data = {}
if req.status == 200:
pass
elif req.status == 400 and data.get("message") == "invalid client":
log.error(
"Twitch API request failed authentication: set Client ID is invalid."
)
elif req.status == 403 and data.get("message") == "invalid client secret":
log.error(
"Twitch API request failed authentication: set Client Secret is invalid."
)
elif "message" in data:
log.error(
"Twitch OAuth2 API request failed with status code %s"
" and error message: %s",
req.status,
data["message"],
)
else:
log.error("Twitch OAuth2 API request failed with status code %s", req.status)
if req.status != 200:
return
self.ttv_bearer_cache = data
self.ttv_bearer_cache["expires_at"] = datetime.now().timestamp() + data.get("expires_in")
async def maybe_renew_twitch_bearer_token(self) -> None:
if self.ttv_bearer_cache:
if self.ttv_bearer_cache["expires_at"] - datetime.now().timestamp() <= 60:
await self.get_twitch_bearer_token()
@commands.command()
async def twitchstream(self, ctx: commands.Context, channel_name: str):
"""Check if a Twitch channel is live."""
await self.maybe_renew_twitch_bearer_token()
token = (await self.bot.get_shared_api_tokens("twitch")).get("client_id")
stream = TwitchStream(
name=channel_name, token=token, bearer=self.ttv_bearer_cache.get("access_token", None),
)
await self.check_online(ctx, stream)
@commands.command()
@commands.cooldown(1, 30, commands.BucketType.guild)
async def youtubestream(self, ctx: commands.Context, channel_id_or_name: str):
"""Check if a YouTube channel is live."""
# TODO: Write up a custom check to look up cooldown set by botowner
# This check is here to avoid people spamming this command and eating up quota
apikey = await self.bot.get_shared_api_tokens("youtube")
is_name = self.check_name_or_id(channel_id_or_name)
if is_name:
stream = YoutubeStream(name=channel_id_or_name, token=apikey)
else:
stream = YoutubeStream(id=channel_id_or_name, token=apikey)
await self.check_online(ctx, stream)
@commands.command()
async def smashcast(self, ctx: commands.Context, channel_name: str):
"""Check if a smashcast channel is live."""
stream = HitboxStream(name=channel_name)
await self.check_online(ctx, stream)
@commands.command()
async def picarto(self, ctx: commands.Context, channel_name: str):
"""Check if a Picarto channel is live."""
stream = PicartoStream(name=channel_name)
await self.check_online(ctx, stream)
async def check_online(
self,
ctx: commands.Context,
stream: Union[PicartoStream, HitboxStream, YoutubeStream, TwitchStream],
):
try:
info = await stream.is_online()
except OfflineStream:
await ctx.send(_("That user is offline."))
except StreamNotFound:
await ctx.send(_("That channel doesn't seem to exist."))
except InvalidTwitchCredentials:
await ctx.send(
_("The Twitch token is either invalid or has not been set. See {command}.").format(
command=f"`{ctx.clean_prefix}streamset twitchtoken`"
)
)
except InvalidYoutubeCredentials:
await ctx.send(
_(
"The YouTube API key is either invalid or has not been set. See {command}."
).format(command=f"`{ctx.clean_prefix}streamset youtubekey`")
)
except APIError:
await ctx.send(
_("Something went wrong whilst trying to contact the stream service's API.")
)
else:
if isinstance(info, tuple):
embed, is_rerun = info
ignore_reruns = await self.config.guild(ctx.channel.guild).ignore_reruns()
if ignore_reruns and is_rerun:
await ctx.send(_("That user is offline."))
return
else:
embed = info
await ctx.send(embed=embed)
@commands.group()
@commands.guild_only()
@checks.mod_or_permissions(manage_channels=True)
async def streamalert(self, ctx: commands.Context):
"""Manage automated stream alerts."""
pass
@streamalert.group(name="twitch", invoke_without_command=True)
async def _twitch(self, ctx: commands.Context, channel_name: str = None):
"""Manage Twitch stream notifications."""
if channel_name is not None:
await ctx.invoke(self.twitch_alert_channel, channel_name)
else:
await ctx.send_help()
@_twitch.command(name="channel")
async def twitch_alert_channel(self, ctx: commands.Context, channel_name: str):
"""Toggle alerts in this channel for a Twitch stream."""
if re.fullmatch(r"<#\d+>", channel_name):
await ctx.send(
_("Please supply the name of a *Twitch* channel, not a Discord channel.")
)
return
await self.stream_alert(ctx, TwitchStream, channel_name.lower())
@streamalert.command(name="youtube")
async def youtube_alert(self, ctx: commands.Context, channel_name_or_id: str):
"""Toggle alerts in this channel for a YouTube stream."""
await self.stream_alert(ctx, YoutubeStream, channel_name_or_id)
@streamalert.command(name="smashcast")
async def smashcast_alert(self, ctx: commands.Context, channel_name: str):
"""Toggle alerts in this channel for a Smashcast stream."""
await self.stream_alert(ctx, HitboxStream, channel_name)
@streamalert.command(name="picarto")
async def picarto_alert(self, ctx: commands.Context, channel_name: str):
"""Toggle alerts in this channel for a Picarto stream."""
await self.stream_alert(ctx, PicartoStream, channel_name)
@streamalert.command(name="stop", usage="[disable_all=No]")
async def streamalert_stop(self, ctx: commands.Context, _all: bool = False):
"""Disable all stream alerts in this channel or server.
`[p]streamalert stop` will disable this channel's stream
alerts.
Do `[p]streamalert stop yes` to disable all stream alerts in
this server.
"""
streams = self.streams.copy()
local_channel_ids = [c.id for c in ctx.guild.channels]
to_remove = []
for stream in streams:
for channel_id in stream.channels:
if channel_id == ctx.channel.id:
stream.channels.remove(channel_id)
elif _all and ctx.channel.id in local_channel_ids:
if channel_id in stream.channels:
stream.channels.remove(channel_id)
if not stream.channels:
to_remove.append(stream)
for stream in to_remove:
streams.remove(stream)
self.streams = streams
await self.save_streams()
if _all:
msg = _("All the stream alerts in this server have been disabled.")
else:
msg = _("All the stream alerts in this channel have been disabled.")
await ctx.send(msg)
@streamalert.command(name="list")
async def streamalert_list(self, ctx: commands.Context):
"""List all active stream alerts in this server."""
streams_list = defaultdict(list)
guild_channels_ids = [c.id for c in ctx.guild.channels]
msg = _("Active alerts:\n\n")
for stream in self.streams:
for channel_id in stream.channels:
if channel_id in guild_channels_ids:
streams_list[channel_id].append(stream.name.lower())
if not streams_list:
await ctx.send(_("There are no active alerts in this server."))
return
for channel_id, streams in streams_list.items():
channel = ctx.guild.get_channel(channel_id)
msg += "** - #{}**\n{}\n".format(channel, ", ".join(streams))
for page in pagify(msg):
await ctx.send(page)
async def stream_alert(self, ctx: commands.Context, _class, channel_name):
stream = self.get_stream(_class, channel_name)
if not stream:
token = await self.bot.get_shared_api_tokens(_class.token_name)
is_yt = _class.__name__ == "YoutubeStream"
is_twitch = _class.__name__ == "TwitchStream"
if is_yt and not self.check_name_or_id(channel_name):
stream = _class(id=channel_name, token=token)
elif is_twitch:
await self.maybe_renew_twitch_bearer_token()
stream = _class(
name=channel_name,
token=token.get("client_id"),
bearer=self.ttv_bearer_cache.get("access_token", None),
)
else:
stream = _class(name=channel_name, token=token)
try:
exists = await self.check_exists(stream)
except InvalidTwitchCredentials:
await ctx.send(
_(
"The Twitch token is either invalid or has not been set. See {command}."
).format(command=f"`{ctx.clean_prefix}streamset twitchtoken`")
)
return
except InvalidYoutubeCredentials:
await ctx.send(
_(
"The YouTube API key is either invalid or has not been set. See "
"{command}."
).format(command=f"`{ctx.clean_prefix}streamset youtubekey`")
)
return
except APIError:
await ctx.send(
_("Something went wrong whilst trying to contact the stream service's API.")
)
return
else:
if not exists:
await ctx.send(_("That channel doesn't seem to exist."))
return
await self.add_or_remove(ctx, stream)
@commands.group()
@checks.mod_or_permissions(manage_channels=True)
async def streamset(self, ctx: commands.Context):
"""Manage stream alert settings."""
pass
@streamset.command(name="timer")
@checks.is_owner()
async def _streamset_refresh_timer(self, ctx: commands.Context, refresh_time: int):
"""Set stream check refresh time."""
if refresh_time < 60:
return await ctx.send(_("You cannot set the refresh timer to less than 60 seconds"))
await self.config.refresh_timer.set(refresh_time)
await ctx.send(
_("Refresh timer set to {refresh_time} seconds".format(refresh_time=refresh_time))
)
@streamset.command()
@checks.is_owner()
async def twitchtoken(self, ctx: commands.Context):
"""Explain how to set the twitch token."""
message = _(
"To set the twitch API tokens, follow these steps:\n"
"1. Go to this page: https://dev.twitch.tv/dashboard/apps.\n"
"2. Click *Register Your Application*.\n"
"3. Enter a name, set the OAuth Redirect URI to `http://localhost`, and "
"select an Application Category of your choosing.\n"
"4. Click *Register*.\n"
"5. Copy your client ID and your client secret into:\n"
"{command}"
"\n\n"
"Note: These tokens are sensitive and should only be used in a private channel\n"
"or in DM with the bot.\n"
).format(
command="`{}set api twitch client_id {} client_secret {}`".format(
ctx.clean_prefix, _("<your_client_id_here>"), _("<your_client_secret_here>")
)
)
await ctx.maybe_send_embed(message)
@streamset.command()
@checks.is_owner()
async def youtubekey(self, ctx: commands.Context):
"""Explain how to set the YouTube token."""
message = _(
"To get one, do the following:\n"
"1. Create a project\n"
"(see https://support.google.com/googleapi/answer/6251787 for details)\n"
"2. Enable the YouTube Data API v3 \n"
"(see https://support.google.com/googleapi/answer/6158841 for instructions)\n"
"3. Set up your API key \n"
"(see https://support.google.com/googleapi/answer/6158862 for instructions)\n"
"4. Copy your API key and run the command "
"{command}\n\n"
"Note: These tokens are sensitive and should only be used in a private channel\n"
"or in DM with the bot.\n"
).format(
command="`{}set api youtube api_key {}`".format(
ctx.clean_prefix, _("<your_api_key_here>")
)
)
await ctx.maybe_send_embed(message)
@streamset.group()
@commands.guild_only()
async def message(self, ctx: commands.Context):
"""Manage custom message for stream alerts."""
pass
@message.command(name="mention")
@commands.guild_only()
async def with_mention(self, ctx: commands.Context, message: str = None):
"""Set stream alert message when mentions are enabled.
Use `{mention}` in the message to insert the selected mentions.
Use `{stream.name}` in the message to insert the channel or user name.
For example: `[p]streamset message mention "{mention}, {stream.name} is live!"`
"""
if message is not None:
guild = ctx.guild
await self.config.guild(guild).live_message_mention.set(message)
await ctx.send(_("Stream alert message set!"))
else:
await ctx.send_help()
@message.command(name="nomention")
@commands.guild_only()
async def without_mention(self, ctx: commands.Context, message: str = None):
"""Set stream alert message when mentions are disabled.
Use `{stream.name}` in the message to insert the channel or user name.
For example: `[p]streamset message nomention "{stream.name} is live!"`
"""
if message is not None:
guild = ctx.guild
await self.config.guild(guild).live_message_nomention.set(message)
await ctx.send(_("Stream alert message set!"))
else:
await ctx.send_help()
@message.command(name="clear")
@commands.guild_only()
async def clear_message(self, ctx: commands.Context):
"""Reset the stream alert messages in this server."""
guild = ctx.guild
await self.config.guild(guild).live_message_mention.set(False)
await self.config.guild(guild).live_message_nomention.set(False)
await ctx.send(_("Stream alerts in this server will now use the default alert message."))
@streamset.group()
@commands.guild_only()
async def mention(self, ctx: commands.Context):
"""Manage mention settings for stream alerts."""
pass
@mention.command(aliases=["everyone"])
@commands.guild_only()
async def all(self, ctx: commands.Context):
"""Toggle the `@\u200beveryone` mention."""
guild = ctx.guild
current_setting = await self.config.guild(guild).mention_everyone()
if current_setting:
await self.config.guild(guild).mention_everyone.set(False)
await ctx.send(_("`@\u200beveryone` will no longer be mentioned for stream alerts."))
else:
await self.config.guild(guild).mention_everyone.set(True)
await ctx.send(_("When a stream is live, `@\u200beveryone` will be mentioned."))
@mention.command(aliases=["here"])
@commands.guild_only()
async def online(self, ctx: commands.Context):
"""Toggle the `@\u200bhere` mention."""
guild = ctx.guild
current_setting = await self.config.guild(guild).mention_here()
if current_setting:
await self.config.guild(guild).mention_here.set(False)
await ctx.send(_("`@\u200bhere` will no longer be mentioned for stream alerts."))
else:
await self.config.guild(guild).mention_here.set(True)
await ctx.send(_("When a stream is live, `@\u200bhere` will be mentioned."))
@mention.command()
@commands.guild_only()
async def role(self, ctx: commands.Context, *, role: discord.Role):
"""Toggle a role mention."""
current_setting = await self.config.role(role).mention()
if current_setting:
await self.config.role(role).mention.set(False)
await ctx.send(
_("`@\u200b{role.name}` will no longer be mentioned for stream alerts.").format(
role=role
)
)
else:
await self.config.role(role).mention.set(True)
msg = _(
"When a stream or community is live, `@\u200b{role.name}` will be mentioned."
).format(role=role)
if not role.mentionable:
msg += " " + _(
"Since the role is not mentionable, it will be momentarily made mentionable "
"when announcing a streamalert. Please make sure I have the correct "
"permissions to manage this role, or else members of this role won't receive "
"a notification."
)
await ctx.send(msg)
@streamset.command()
@commands.guild_only()
async def autodelete(self, ctx: commands.Context, on_off: bool):
"""Toggle alert deletion for when streams go offline."""
await self.config.guild(ctx.guild).autodelete.set(on_off)
if on_off:
await ctx.send(_("The notifications will be deleted once streams go offline."))
else:
await ctx.send(_("Notifications will no longer be deleted."))
@streamset.command(name="ignorereruns")
@commands.guild_only()
async def ignore_reruns(self, ctx: commands.Context):
"""Toggle excluding rerun streams from alerts."""
guild = ctx.guild
current_setting = await self.config.guild(guild).ignore_reruns()
if current_setting:
await self.config.guild(guild).ignore_reruns.set(False)
await ctx.send(_("Streams of type 'rerun' will be included in alerts."))
else:
await self.config.guild(guild).ignore_reruns.set(True)
await ctx.send(_("Streams of type 'rerun' will no longer send an alert."))
async def add_or_remove(self, ctx: commands.Context, stream):
if ctx.channel.id not in stream.channels:
stream.channels.append(ctx.channel.id)
if stream not in self.streams:
self.streams.append(stream)
await ctx.send(
_(
"I'll now send a notification in this channel when {stream.name} is live."
).format(stream=stream)
)
else:
stream.channels.remove(ctx.channel.id)
if not stream.channels:
self.streams.remove(stream)
await ctx.send(
_(
"I won't send notifications about {stream.name} in this channel anymore."
).format(stream=stream)
)
await self.save_streams()
def get_stream(self, _class, name):
for stream in self.streams:
# if isinstance(stream, _class) and stream.name == name:
# return stream
# Reloading this cog causes an issue with this check ^
# isinstance will always return False
# As a workaround, we'll compare the class' name instead.
# Good enough.
if _class.__name__ == "YoutubeStream" and stream.type == _class.__name__:
# Because name could be a username or a channel id
if self.check_name_or_id(name) and stream.name.lower() == name.lower():
return stream
elif not self.check_name_or_id(name) and stream.id == name:
return stream
elif stream.type == _class.__name__ and stream.name.lower() == name.lower():
return stream
@staticmethod
async def check_exists(stream):
try:
await stream.is_online()
except OfflineStream:
pass
except StreamNotFound:
return False
except StreamsError:
raise
return True
async def _stream_alerts(self):
await self.bot.wait_until_ready()
while True:
try:
await self.check_streams()
except asyncio.CancelledError:
pass
await asyncio.sleep(await self.config.refresh_timer())
async def check_streams(self):
for stream in self.streams:
with contextlib.suppress(Exception):
try:
if stream.__class__.__name__ == "TwitchStream":
await self.maybe_renew_twitch_bearer_token()
embed, is_rerun = await stream.is_online()
else:
embed = await stream.is_online()
is_rerun = False
except OfflineStream:
if not stream._messages_cache:
continue
for message in stream._messages_cache:
with contextlib.suppress(Exception):
if await self.bot.cog_disabled_in_guild(self, message.guild):
continue
autodelete = await self.config.guild(message.guild).autodelete()
if autodelete:
await message.delete()
stream._messages_cache.clear()
await self.save_streams()
else:
if stream._messages_cache:
continue
for channel_id in stream.channels:
channel = self.bot.get_channel(channel_id)
if not channel:
continue
if await self.bot.cog_disabled_in_guild(self, channel.guild):
continue
ignore_reruns = await self.config.guild(channel.guild).ignore_reruns()
if ignore_reruns and is_rerun:
continue
mention_str, edited_roles = await self._get_mention_str(channel.guild)
if mention_str:
alert_msg = await self.config.guild(
channel.guild
).live_message_mention()
if alert_msg:
content = alert_msg.format(mention=mention_str, stream=stream)
else:
content = _("{mention}, {stream} is live!").format(
mention=mention_str,
stream=escape(
str(stream.name), mass_mentions=True, formatting=True
),
)
else:
alert_msg = await self.config.guild(
channel.guild
).live_message_nomention()
if alert_msg:
content = alert_msg.format(stream=stream)
else:
content = _("{stream} is live!").format(
stream=escape(
str(stream.name), mass_mentions=True, formatting=True
)
)
m = await channel.send(content, embed=embed)
stream._messages_cache.append(m)
if edited_roles:
for role in edited_roles:
await role.edit(mentionable=False)
await self.save_streams()
async def _get_mention_str(self, guild: discord.Guild) -> Tuple[str, List[discord.Role]]:
"""Returns a 2-tuple with the string containing the mentions, and a list of
all roles which need to have their `mentionable` property set back to False.
"""
settings = self.config.guild(guild)
mentions = []
edited_roles = []
if await settings.mention_everyone():
mentions.append("@everyone")
if await settings.mention_here():
mentions.append("@here")
can_manage_roles = guild.me.guild_permissions.manage_roles
for role in guild.roles:
if await self.config.role(role).mention():
if can_manage_roles and not role.mentionable:
try:
await role.edit(mentionable=True)
except discord.Forbidden:
# Might still be unable to edit role based on hierarchy
pass
else:
edited_roles.append(role)
mentions.append(role.mention)
return " ".join(mentions), edited_roles
async def filter_streams(self, streams: list, channel: discord.TextChannel) -> list:
filtered = []
for stream in streams:
tw_id = str(stream["channel"]["_id"])
for alert in self.streams:
if isinstance(alert, TwitchStream) and alert.id == tw_id:
if channel.id in alert.channels:
break
else:
filtered.append(stream)
return filtered
async def load_streams(self):
streams = []
for raw_stream in await self.config.streams():
_class = getattr(_streamtypes, raw_stream["type"], None)
if not _class:
continue
raw_msg_cache = raw_stream["messages"]
raw_stream["_messages_cache"] = []
for raw_msg in raw_msg_cache:
chn = self.bot.get_channel(raw_msg["channel"])
if chn is not None:
try:
msg = await chn.fetch_message(raw_msg["message"])
except discord.HTTPException:
pass
else:
raw_stream["_messages_cache"].append(msg)
token = await self.bot.get_shared_api_tokens(_class.token_name)
if token:
if _class.__name__ == "TwitchStream":
raw_stream["token"] = token.get("client_id")
raw_stream["bearer"] = self.ttv_bearer_cache.get("access_token", None)
else:
raw_stream["token"] = token
streams.append(_class(**raw_stream))
return streams
async def save_streams(self):
raw_streams = []
for stream in self.streams:
raw_streams.append(stream.export())
await self.config.streams.set(raw_streams)
def cog_unload(self):
if self.task:
self.task.cancel()
__del__ = cog_unload
| ./CrossVul/dataset_final_sorted/CWE-94/py/bad_4113_0 |
crossvul-python_data_good_663_0 | # -*- coding: utf-8 -*-
"""
eve.io.mongo.parser
~~~~~~~~~~~~~~~~~~~
This module implements a Python-to-Mongo syntax parser. Allows the MongoDB
data-layer to seamlessly respond to a Python-like query.
:copyright: (c) 2017 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import ast
import sys
from datetime import datetime # noqa
from bson import ObjectId # noqa
def parse(expression):
""" Given a python-like conditional statement, returns the equivalent
mongo-like query expression. Conditional and boolean operators (==, <=, >=,
!=, >, <) along with a couple function calls (ObjectId(), datetime()) are
supported.
"""
v = MongoVisitor()
try:
v.visit(ast.parse(expression))
except SyntaxError as e:
e = ParseError(e)
e.__traceback__ = sys.exc_info()[2]
raise e
return v.mongo_query
class ParseError(ValueError):
pass
class MongoVisitor(ast.NodeVisitor):
""" Implements the python-to-mongo parser. Only Python conditional
statements are supported, however nested, combined with most common compare
and boolean operators (And and Or).
Supported compare operators: ==, >, <, !=, >=, <=
Supported boolean operators: And, Or
"""
op_mapper = {
ast.Eq: '',
ast.Gt: '$gt',
ast.GtE: '$gte',
ast.Lt: '$lt',
ast.LtE: '$lte',
ast.NotEq: '$ne',
ast.Or: '$or',
ast.And: '$and'
}
def visit_Module(self, node):
""" Module handler, our entry point.
"""
self.mongo_query = {}
self.ops = []
self.current_value = None
# perform the magic.
self.generic_visit(node)
# if we didn't obtain a query, it is likely that an unsupported
# python expression has been passed.
if self.mongo_query == {}:
raise ParseError("Only conditional statements with boolean "
"(and, or) and comparison operators are "
"supported.")
def visit_Expr(self, node):
""" Make sure that we are parsing compare or boolean operators
"""
if not (isinstance(node.value, ast.Compare) or
isinstance(node.value, ast.BoolOp)):
raise ParseError("Will only parse conditional statements")
self.generic_visit(node)
def visit_Compare(self, node):
""" Compare operator handler.
"""
self.visit(node.left)
left = self.current_value
operator = self.op_mapper[node.ops[0].__class__] if node.ops else None
if node.comparators:
comparator = node.comparators[0]
self.visit(comparator)
if operator != '':
value = {operator: self.current_value}
else:
value = self.current_value
if self.ops:
self.ops[-1].append({left: value})
else:
self.mongo_query[left] = value
def visit_BoolOp(self, node):
""" Boolean operator handler.
"""
op = self.op_mapper[node.op.__class__]
self.ops.append([])
for value in node.values:
self.visit(value)
c = self.ops.pop()
if self.ops:
self.ops[-1].append({op: c})
else:
self.mongo_query[op] = c
def visit_Call(self, node):
""" A couple function calls are supported: bson's ObjectId() and
datetime().
"""
if isinstance(node.func, ast.Name):
if node.func.id == 'ObjectId':
try:
self.current_value = ObjectId(node.args[0].s)
except:
pass
elif node.func.id == 'datetime':
values = []
for arg in node.args:
values.append(arg.n)
try:
self.current_value = datetime(*values)
except:
pass
def visit_Attribute(self, node):
""" Attribute handler ('Contact.Id').
"""
self.visit(node.value)
self.current_value += "." + node.attr
def visit_Name(self, node):
""" Names handler.
"""
self.current_value = node.id
def visit_Num(self, node):
""" Numbers handler.
"""
self.current_value = node.n
def visit_Str(self, node):
""" Strings handler.
"""
self.current_value = node.s
| ./CrossVul/dataset_final_sorted/CWE-94/py/good_663_0 |
crossvul-python_data_good_3064_8 | import sys
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def _read(fname):
here = os.path.dirname(os.path.abspath(__file__))
return open(os.path.join(here, fname)).read()
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
if len(sys.argv) == 2:
self.pytest_args = ['ansible_vault']
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
sys.exit(pytest.main(self.pytest_args))
setup(
name='ansible-vault',
version='1.0.5',
author='Tomohiro NAKAMURA',
author_email='quickness.net@gmail.com',
url='https://github.com/tomoh1r/ansible-vault',
description='R/W an ansible-vault yaml file',
long_description=_read('README.rst'),
packages=find_packages(),
install_requires=['ansible'],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
],
license='GPLv3',
extras_require = {
'test': ['pytest', 'testfixtures'],
}
)
| ./CrossVul/dataset_final_sorted/CWE-94/py/good_3064_8 |
crossvul-python_data_bad_997_1 | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import getpass
import hashlib
import json
import mimetypes
import os
import pkgutil
import re
import sys
import time
import uuid
from itertools import chain
from os.path import basename
from os.path import join
from .._compat import text_type
from .._internal import _log
from ..http import parse_cookie
from ..security import gen_salt
from ..wrappers import BaseRequest as Request
from ..wrappers import BaseResponse as Response
from .console import Console
from .repr import debug_repr as _debug_repr
from .tbtools import get_current_traceback
from .tbtools import render_console_html
def debug_repr(*args, **kwargs):
import warnings
warnings.warn(
"'debug_repr' has moved to 'werkzeug.debug.repr.debug_repr'"
" as of version 0.7. This old import will be removed in version"
" 1.0.",
DeprecationWarning,
stacklevel=2,
)
return _debug_repr(*args, **kwargs)
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin):
if isinstance(pin, text_type):
pin = pin.encode("utf-8", "replace")
return hashlib.md5(pin + b"shittysalt").hexdigest()[:12]
_machine_id = None
def get_machine_id():
global _machine_id
rv = _machine_id
if rv is not None:
return rv
def _generate():
# Potential sources of secret information on linux. The machine-id
# is stable across boots, the boot id is not
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
try:
with open(filename, "rb") as f:
return f.readline().strip()
except IOError:
continue
# On OS X we can use the computer's serial number assuming that
# ioreg exists and can spit out that information.
try:
# Also catch import errors: subprocess may not be available, e.g.
# Google App Engine
# See https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows we can use winreg to get the machine guid
wr = None
try:
import winreg as wr
except ImportError:
try:
import _winreg as wr
except ImportError:
pass
if wr is not None:
try:
with wr.OpenKey(
wr.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Cryptography",
0,
wr.KEY_READ | wr.KEY_WOW64_64KEY,
) as rk:
machineGuid, wrType = wr.QueryValueEx(rk, "MachineGuid")
if wrType == wr.REG_SZ:
return machineGuid.encode("utf-8")
else:
return machineGuid
except WindowsError:
pass
_machine_id = rv = _generate()
return rv
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(app):
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdigit():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", app.__class__.__module__)
try:
# getuser imports the pwd module, which does not exist in Google
# App Engine. It may also raise a KeyError if the UID does not
# have a username, such as in Docker.
username = getpass.getuser()
except (ImportError, KeyError):
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", app.__class__.__name__),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.md5()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, text_type):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = "__wzd" + h.hexdigest()[:20]
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = ("%09d" % int(h.hexdigest(), 16))[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
def __init__(
self,
app,
evalex=False,
request_key="werkzeug.request",
console_path="/console",
console_init_func=None,
show_hidden_frames=False,
lodgeit_url=None,
pin_security=True,
pin_logging=True,
):
if lodgeit_url is not None:
from warnings import warn
warn(
"'lodgeit_url' is no longer used as of version 0.9 and"
" will be removed in version 1.0. Werkzeug uses"
" https://gist.github.com/ instead.",
DeprecationWarning,
stacklevel=2,
)
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
_log("warning", " * Debugger is active!")
if self.pin is None:
_log("warning", " * Debugger PIN disabled. DEBUGGER UNSECURED!")
else:
_log("info", " * Debugger PIN: %s" % self.pin)
else:
self.pin = None
def _get_pin(self):
if not hasattr(self, "_pin"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin
def _set_pin(self, value):
self._pin = value
pin = property(_get_pin, _set_pin)
del _get_pin, _set_pin
@property
def pin_cookie_name(self):
"""The name of the pin cookie."""
if not hasattr(self, "_pin_cookie"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin_cookie
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, "close"):
app_iter.close()
except Exception:
if hasattr(app_iter, "close"):
app_iter.close()
traceback = get_current_traceback(
skip=1,
show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True,
)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response(
"500 INTERNAL SERVER ERROR",
[
("Content-Type", "text/html; charset=utf-8"),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
("X-XSS-Protection", "0"),
],
)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ["wsgi.errors"].write(
"Debugging middleware caught exception in streamed "
"response at a point where response headers were already "
"sent.\n"
)
else:
is_trusted = bool(self.check_pin_trust(environ))
yield traceback.render_full(
evalex=self.evalex, evalex_trusted=is_trusted, secret=self.secret
).encode("utf-8", "replace")
traceback.log(environ["wsgi.errors"])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html")
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault("app", self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
mimetype="text/html",
)
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype="application/json")
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join("shared", basename(filename))
try:
data = pkgutil.get_data(__package__, filename)
except OSError:
data = None
if data is not None:
mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream"
return Response(data, mimetype=mimetype)
return Response("Not Found", status=404)
def check_pin_trust(self, environ):
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
ts, pin_hash = val.split("|", 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
def _fail_pin_auth(self):
time.sleep(5.0 if self._failed_pin_auth > 5 else 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get("pin")
if entered_pin.strip().replace("-", "") == self.pin.replace("-", ""):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
"%s|%s" % (int(time.time()), hash_pin(self.pin)),
httponly=True,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self):
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log(
"info", " * To enable the debugger you need to enter the security pin:"
)
_log("info", " * Debugger pin code: %s" % self.pin)
return Response("")
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get("__debugger__") == "yes":
cmd = request.args.get("cmd")
arg = request.args.get("f")
secret = request.args.get("s")
traceback = self.tracebacks.get(request.args.get("tb", type=int))
frame = self.frames.get(request.args.get("frm", type=int))
if cmd == "resource" and arg:
response = self.get_resource(request, arg)
elif cmd == "paste" and traceback is not None and secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == "pinauth" and secret == self.secret:
response = self.pin_auth(request)
elif cmd == "printpin" and secret == self.secret:
response = self.log_pin_request()
elif (
self.evalex
and cmd is not None
and frame is not None
and self.secret == secret
and self.check_pin_trust(environ)
):
response = self.execute_command(request, cmd, frame)
elif (
self.evalex
and self.console_path is not None
and request.path == self.console_path
):
response = self.display_console(request)
return response(environ, start_response)
| ./CrossVul/dataset_final_sorted/CWE-331/py/bad_997_1 |
crossvul-python_data_good_997_1 | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import getpass
import hashlib
import json
import mimetypes
import os
import pkgutil
import re
import sys
import time
import uuid
from itertools import chain
from os.path import basename
from os.path import join
from .._compat import text_type
from .._internal import _log
from ..http import parse_cookie
from ..security import gen_salt
from ..wrappers import BaseRequest as Request
from ..wrappers import BaseResponse as Response
from .console import Console
from .repr import debug_repr as _debug_repr
from .tbtools import get_current_traceback
from .tbtools import render_console_html
def debug_repr(*args, **kwargs):
import warnings
warnings.warn(
"'debug_repr' has moved to 'werkzeug.debug.repr.debug_repr'"
" as of version 0.7. This old import will be removed in version"
" 1.0.",
DeprecationWarning,
stacklevel=2,
)
return _debug_repr(*args, **kwargs)
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin):
if isinstance(pin, text_type):
pin = pin.encode("utf-8", "replace")
return hashlib.md5(pin + b"shittysalt").hexdigest()[:12]
_machine_id = None
def get_machine_id():
global _machine_id
rv = _machine_id
if rv is not None:
return rv
def _generate():
# docker containers share the same machine id, get the
# container id instead
try:
with open("/proc/self/cgroup") as f:
value = f.readline()
except IOError:
pass
else:
value = value.strip().partition("/docker/")[2]
if value:
return value
# Potential sources of secret information on linux. The machine-id
# is stable across boots, the boot id is not
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
try:
with open(filename, "rb") as f:
return f.readline().strip()
except IOError:
continue
# On OS X we can use the computer's serial number assuming that
# ioreg exists and can spit out that information.
try:
# Also catch import errors: subprocess may not be available, e.g.
# Google App Engine
# See https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows we can use winreg to get the machine guid
wr = None
try:
import winreg as wr
except ImportError:
try:
import _winreg as wr
except ImportError:
pass
if wr is not None:
try:
with wr.OpenKey(
wr.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Cryptography",
0,
wr.KEY_READ | wr.KEY_WOW64_64KEY,
) as rk:
machineGuid, wrType = wr.QueryValueEx(rk, "MachineGuid")
if wrType == wr.REG_SZ:
return machineGuid.encode("utf-8")
else:
return machineGuid
except WindowsError:
pass
_machine_id = rv = _generate()
return rv
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(app):
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdigit():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", app.__class__.__module__)
try:
# getuser imports the pwd module, which does not exist in Google
# App Engine. It may also raise a KeyError if the UID does not
# have a username, such as in Docker.
username = getpass.getuser()
except (ImportError, KeyError):
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", app.__class__.__name__),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.md5()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, text_type):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = "__wzd" + h.hexdigest()[:20]
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = ("%09d" % int(h.hexdigest(), 16))[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
def __init__(
self,
app,
evalex=False,
request_key="werkzeug.request",
console_path="/console",
console_init_func=None,
show_hidden_frames=False,
lodgeit_url=None,
pin_security=True,
pin_logging=True,
):
if lodgeit_url is not None:
from warnings import warn
warn(
"'lodgeit_url' is no longer used as of version 0.9 and"
" will be removed in version 1.0. Werkzeug uses"
" https://gist.github.com/ instead.",
DeprecationWarning,
stacklevel=2,
)
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
_log("warning", " * Debugger is active!")
if self.pin is None:
_log("warning", " * Debugger PIN disabled. DEBUGGER UNSECURED!")
else:
_log("info", " * Debugger PIN: %s" % self.pin)
else:
self.pin = None
def _get_pin(self):
if not hasattr(self, "_pin"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin
def _set_pin(self, value):
self._pin = value
pin = property(_get_pin, _set_pin)
del _get_pin, _set_pin
@property
def pin_cookie_name(self):
"""The name of the pin cookie."""
if not hasattr(self, "_pin_cookie"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin_cookie
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, "close"):
app_iter.close()
except Exception:
if hasattr(app_iter, "close"):
app_iter.close()
traceback = get_current_traceback(
skip=1,
show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True,
)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response(
"500 INTERNAL SERVER ERROR",
[
("Content-Type", "text/html; charset=utf-8"),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
("X-XSS-Protection", "0"),
],
)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ["wsgi.errors"].write(
"Debugging middleware caught exception in streamed "
"response at a point where response headers were already "
"sent.\n"
)
else:
is_trusted = bool(self.check_pin_trust(environ))
yield traceback.render_full(
evalex=self.evalex, evalex_trusted=is_trusted, secret=self.secret
).encode("utf-8", "replace")
traceback.log(environ["wsgi.errors"])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html")
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault("app", self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
mimetype="text/html",
)
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype="application/json")
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join("shared", basename(filename))
try:
data = pkgutil.get_data(__package__, filename)
except OSError:
data = None
if data is not None:
mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream"
return Response(data, mimetype=mimetype)
return Response("Not Found", status=404)
def check_pin_trust(self, environ):
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
ts, pin_hash = val.split("|", 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
def _fail_pin_auth(self):
time.sleep(5.0 if self._failed_pin_auth > 5 else 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get("pin")
if entered_pin.strip().replace("-", "") == self.pin.replace("-", ""):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
"%s|%s" % (int(time.time()), hash_pin(self.pin)),
httponly=True,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self):
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log(
"info", " * To enable the debugger you need to enter the security pin:"
)
_log("info", " * Debugger pin code: %s" % self.pin)
return Response("")
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get("__debugger__") == "yes":
cmd = request.args.get("cmd")
arg = request.args.get("f")
secret = request.args.get("s")
traceback = self.tracebacks.get(request.args.get("tb", type=int))
frame = self.frames.get(request.args.get("frm", type=int))
if cmd == "resource" and arg:
response = self.get_resource(request, arg)
elif cmd == "paste" and traceback is not None and secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == "pinauth" and secret == self.secret:
response = self.pin_auth(request)
elif cmd == "printpin" and secret == self.secret:
response = self.log_pin_request()
elif (
self.evalex
and cmd is not None
and frame is not None
and self.secret == secret
and self.check_pin_trust(environ)
):
response = self.execute_command(request, cmd, frame)
elif (
self.evalex
and self.console_path is not None
and request.path == self.console_path
):
response = self.display_console(request)
return response(environ, start_response)
| ./CrossVul/dataset_final_sorted/CWE-331/py/good_997_1 |
crossvul-python_data_bad_4331_0 | import sys
import ldap # pylint: disable=import-error
from flask import current_app, jsonify, request
from flask_cors import cross_origin
from alerta.auth.utils import create_token, get_customers
from alerta.exceptions import ApiError
from alerta.models.permission import Permission
from alerta.models.user import User
from alerta.utils.audit import auth_audit_trail
from . import auth
@auth.route('/auth/login', methods=['OPTIONS', 'POST'])
@cross_origin(supports_credentials=True)
def login():
# Allow LDAP server to use a self signed certificate
if current_app.config['LDAP_ALLOW_SELF_SIGNED_CERT']:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
# Retrieve required fields from client request
try:
login = request.json.get('username', None) or request.json['email']
password = request.json['password']
except KeyError:
raise ApiError("must supply 'username' and 'password'", 401)
try:
if '\\' in login:
domain, username = login.split('\\')
email = ''
email_verified = False
else:
username, domain = login.split('@')
email = login
email_verified = True
except ValueError:
raise ApiError('expected username with domain', 401)
# Validate LDAP domain
if domain not in current_app.config['LDAP_DOMAINS']:
raise ApiError('unauthorized domain', 403)
userdn = current_app.config['LDAP_DOMAINS'][domain] % username
# Attempt LDAP AUTH
try:
trace_level = 2 if current_app.debug else 0
ldap_connection = ldap.initialize(current_app.config['LDAP_URL'], trace_level=trace_level)
ldap_connection.simple_bind_s(userdn, password)
except ldap.INVALID_CREDENTIALS:
raise ApiError('invalid username or password', 401)
except Exception as e:
raise ApiError(str(e), 500)
# Get email address from LDAP
if not email_verified:
try:
ldap_result = ldap_connection.search_s(userdn, ldap.SCOPE_SUBTREE, '(objectClass=*)', ['mail'])
email = ldap_result[0][1]['mail'][0].decode(sys.stdout.encoding)
email_verified = True
except Exception:
email = '{}@{}'.format(username, domain)
# Create user if not yet there
user = User.find_by_username(username=login)
if not user:
user = User(name=username, login=login, password='', email=email,
roles=[], text='LDAP user', email_verified=email_verified)
try:
user = user.create()
except Exception as e:
ApiError(str(e), 500)
# Assign customers & update last login time
groups = list()
try:
groups_filters = current_app.config.get('LDAP_DOMAINS_GROUP', {})
base_dns = current_app.config.get('LDAP_DOMAINS_BASEDN', {})
if domain in groups_filters and domain in base_dns:
resultID = ldap_connection.search(
base_dns[domain],
ldap.SCOPE_SUBTREE,
groups_filters[domain].format(username=username, email=email, userdn=userdn),
['cn']
)
resultTypes, results = ldap_connection.result(resultID)
for _dn, attributes in results:
groups.append(attributes['cn'][0].decode('utf-8'))
except ldap.LDAPError as e:
raise ApiError(str(e), 500)
# Check user is active
if user.status != 'active':
raise ApiError('User {} not active'.format(login), 403)
user.update_last_login()
scopes = Permission.lookup(login=login, roles=user.roles + groups)
customers = get_customers(login=login, groups=[user.domain] + groups)
auth_audit_trail.send(current_app._get_current_object(), event='basic-ldap-login', message='user login via LDAP',
user=login, customers=customers, scopes=scopes, roles=user.roles, groups=groups,
resource_id=user.id, type='user', request=request)
# Generate token
token = create_token(user_id=user.id, name=user.name, login=user.email, provider='ldap',
customers=customers, scopes=scopes, roles=user.roles, groups=groups,
email=user.email, email_verified=user.email_verified)
return jsonify(token=token.tokenize)
| ./CrossVul/dataset_final_sorted/CWE-287/py/bad_4331_0 |
crossvul-python_data_good_1224_5 | from typing import Union, Optional, Dict, Any, List
import ujson
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from django.shortcuts import redirect, render
from django.conf import settings
from zerver.decorator import require_realm_admin, require_member_or_admin
from zerver.forms import CreateUserForm
from zerver.lib.events import get_raw_user_data
from zerver.lib.actions import do_change_avatar_fields, do_change_bot_owner, \
do_change_is_admin, do_change_default_all_public_streams, \
do_change_default_events_register_stream, do_change_default_sending_stream, \
do_create_user, do_deactivate_user, do_reactivate_user, do_regenerate_api_key, \
check_change_full_name, notify_created_bot, do_update_outgoing_webhook_service, \
do_update_bot_config_data, check_change_bot_full_name, do_change_is_guest, \
do_update_user_custom_profile_data_if_changed, check_remove_custom_profile_field_value
from zerver.lib.avatar import avatar_url, get_gravatar_url
from zerver.lib.bot_config import set_bot_config
from zerver.lib.exceptions import CannotDeactivateLastUserError
from zerver.lib.integrations import EMBEDDED_BOTS
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.response import json_error, json_success
from zerver.lib.storage import static_path
from zerver.lib.streams import access_stream_by_name
from zerver.lib.upload import upload_avatar_image
from zerver.lib.users import get_api_key
from zerver.lib.validator import check_bool, check_string, check_int, check_url, check_dict, check_list
from zerver.lib.users import check_valid_bot_type, check_bot_creation_policy, \
check_full_name, check_short_name, check_valid_interface_type, check_valid_bot_config, \
access_bot_by_id, add_service, access_user_by_id, check_bot_name_available, \
validate_user_custom_profile_data
from zerver.lib.utils import generate_api_key, generate_random_token
from zerver.models import UserProfile, Stream, Message, email_allowed_for_realm, \
get_user_by_delivery_email, Service, get_user_including_cross_realm, \
DomainNotAllowedForRealmError, DisposableEmailError, get_user_profile_by_id_in_realm, \
EmailContainsPlusError, get_user_by_id_in_realm_including_cross_realm, Realm, \
InvalidFakeEmailDomain
def deactivate_user_backend(request: HttpRequest, user_profile: UserProfile,
user_id: int) -> HttpResponse:
target = access_user_by_id(user_profile, user_id)
if check_last_admin(target):
return json_error(_('Cannot deactivate the only organization administrator'))
return _deactivate_user_profile_backend(request, user_profile, target)
def deactivate_user_own_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
if UserProfile.objects.filter(realm=user_profile.realm, is_active=True).count() == 1:
raise CannotDeactivateLastUserError(is_last_admin=False)
if user_profile.is_realm_admin and check_last_admin(user_profile):
raise CannotDeactivateLastUserError(is_last_admin=True)
do_deactivate_user(user_profile, acting_user=user_profile)
return json_success()
def check_last_admin(user_profile: UserProfile) -> bool:
admins = set(user_profile.realm.get_human_admin_users())
return user_profile.is_realm_admin and not user_profile.is_bot and len(admins) == 1
def deactivate_bot_backend(request: HttpRequest, user_profile: UserProfile,
bot_id: int) -> HttpResponse:
target = access_bot_by_id(user_profile, bot_id)
return _deactivate_user_profile_backend(request, user_profile, target)
def _deactivate_user_profile_backend(request: HttpRequest, user_profile: UserProfile,
target: UserProfile) -> HttpResponse:
do_deactivate_user(target, acting_user=user_profile)
return json_success()
def reactivate_user_backend(request: HttpRequest, user_profile: UserProfile,
user_id: int) -> HttpResponse:
target = access_user_by_id(user_profile, user_id, allow_deactivated=True, allow_bots=True)
if target.is_bot:
assert target.bot_type is not None
check_bot_creation_policy(user_profile, target.bot_type)
do_reactivate_user(target, acting_user=user_profile)
return json_success()
@has_request_variables
def update_user_backend(request: HttpRequest, user_profile: UserProfile, user_id: int,
full_name: Optional[str]=REQ(default="", validator=check_string),
is_admin: Optional[bool]=REQ(default=None, validator=check_bool),
is_guest: Optional[bool]=REQ(default=None, validator=check_bool),
profile_data: Optional[List[Dict[str, Union[int, str, List[int]]]]]=
REQ(default=None,
validator=check_list(check_dict([('id', check_int)])))) -> HttpResponse:
target = access_user_by_id(user_profile, user_id, allow_deactivated=True, allow_bots=True)
# Historically, UserProfile had two fields, is_guest and is_realm_admin.
# This condition protected against situations where update_user_backend
# could cause both is_guest and is_realm_admin to be set.
# Once we update the frontend to just send a 'role' value, we can remove this check.
if (((is_guest is None and target.is_guest) or is_guest) and
((is_admin is None and target.is_realm_admin) or is_admin)):
return json_error(_("Guests cannot be organization administrators"))
if is_admin is not None and target.is_realm_admin != is_admin:
if not is_admin and check_last_admin(user_profile):
return json_error(_('Cannot remove the only organization administrator'))
do_change_is_admin(target, is_admin)
if is_guest is not None and target.is_guest != is_guest:
do_change_is_guest(target, is_guest)
if (full_name is not None and target.full_name != full_name and
full_name.strip() != ""):
# We don't respect `name_changes_disabled` here because the request
# is on behalf of the administrator.
check_change_full_name(target, full_name, user_profile)
if profile_data is not None:
clean_profile_data = []
for entry in profile_data:
if not entry["value"]:
field_id = entry["id"]
check_remove_custom_profile_field_value(target, field_id)
else:
clean_profile_data.append(entry)
validate_user_custom_profile_data(target.realm.id, clean_profile_data)
do_update_user_custom_profile_data_if_changed(target, clean_profile_data)
return json_success()
def avatar(request: HttpRequest, user_profile: UserProfile,
email_or_id: str, medium: bool=False) -> HttpResponse:
"""Accepts an email address or user ID and returns the avatar"""
is_email = False
try:
int(email_or_id)
except ValueError:
is_email = True
try:
realm = user_profile.realm
if is_email:
avatar_user_profile = get_user_including_cross_realm(email_or_id, realm)
else:
avatar_user_profile = get_user_by_id_in_realm_including_cross_realm(int(email_or_id), realm)
# If there is a valid user account passed in, use its avatar
url = avatar_url(avatar_user_profile, medium=medium)
except UserProfile.DoesNotExist:
# If there is no such user, treat it as a new gravatar
email = email_or_id
avatar_version = 1
url = get_gravatar_url(email, avatar_version, medium)
# We can rely on the url already having query parameters. Because
# our templates depend on being able to use the ampersand to
# add query parameters to our url, get_avatar_url does '?x=x'
# hacks to prevent us from having to jump through decode/encode hoops.
assert url is not None
assert '?' in url
url += '&' + request.META['QUERY_STRING']
return redirect(url)
def get_stream_name(stream: Optional[Stream]) -> Optional[str]:
if stream:
return stream.name
return None
@require_member_or_admin
@has_request_variables
def patch_bot_backend(
request: HttpRequest, user_profile: UserProfile, bot_id: int,
full_name: Optional[str]=REQ(default=None),
bot_owner_id: Optional[int]=REQ(validator=check_int, default=None),
config_data: Optional[Dict[str, str]]=REQ(default=None,
validator=check_dict(value_validator=check_string)),
service_payload_url: Optional[str]=REQ(validator=check_url, default=None),
service_interface: Optional[int]=REQ(validator=check_int, default=1),
default_sending_stream: Optional[str]=REQ(default=None),
default_events_register_stream: Optional[str]=REQ(default=None),
default_all_public_streams: Optional[bool]=REQ(default=None, validator=check_bool)
) -> HttpResponse:
bot = access_bot_by_id(user_profile, bot_id)
if full_name is not None:
check_change_bot_full_name(bot, full_name, user_profile)
if bot_owner_id is not None:
try:
owner = get_user_profile_by_id_in_realm(bot_owner_id, user_profile.realm)
except UserProfile.DoesNotExist:
return json_error(_('Failed to change owner, no such user'))
if not owner.is_active:
return json_error(_('Failed to change owner, user is deactivated'))
if owner.is_bot:
return json_error(_("Failed to change owner, bots can't own other bots"))
previous_owner = bot.bot_owner
if previous_owner != owner:
do_change_bot_owner(bot, owner, user_profile)
if default_sending_stream is not None:
if default_sending_stream == "":
stream = None # type: Optional[Stream]
else:
(stream, recipient, sub) = access_stream_by_name(
user_profile, default_sending_stream)
do_change_default_sending_stream(bot, stream)
if default_events_register_stream is not None:
if default_events_register_stream == "":
stream = None
else:
(stream, recipient, sub) = access_stream_by_name(
user_profile, default_events_register_stream)
do_change_default_events_register_stream(bot, stream)
if default_all_public_streams is not None:
do_change_default_all_public_streams(bot, default_all_public_streams)
if service_payload_url is not None:
check_valid_interface_type(service_interface)
assert service_interface is not None
do_update_outgoing_webhook_service(bot, service_interface, service_payload_url)
if config_data is not None:
do_update_bot_config_data(bot, config_data)
if len(request.FILES) == 0:
pass
elif len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, bot)
avatar_source = UserProfile.AVATAR_FROM_USER
do_change_avatar_fields(bot, avatar_source)
else:
return json_error(_("You may only upload one file at a time"))
json_result = dict(
full_name=bot.full_name,
avatar_url=avatar_url(bot),
service_interface = service_interface,
service_payload_url = service_payload_url,
config_data = config_data,
default_sending_stream=get_stream_name(bot.default_sending_stream),
default_events_register_stream=get_stream_name(bot.default_events_register_stream),
default_all_public_streams=bot.default_all_public_streams,
)
# Don't include the bot owner in case it is not set.
# Default bots have no owner.
if bot.bot_owner is not None:
json_result['bot_owner'] = bot.bot_owner.email
return json_success(json_result)
@require_member_or_admin
@has_request_variables
def regenerate_bot_api_key(request: HttpRequest, user_profile: UserProfile, bot_id: int) -> HttpResponse:
bot = access_bot_by_id(user_profile, bot_id)
new_api_key = do_regenerate_api_key(bot, user_profile)
json_result = dict(
api_key=new_api_key
)
return json_success(json_result)
@require_member_or_admin
@has_request_variables
def add_bot_backend(
request: HttpRequest, user_profile: UserProfile,
full_name_raw: str=REQ("full_name"), short_name_raw: str=REQ("short_name"),
bot_type: int=REQ(validator=check_int, default=UserProfile.DEFAULT_BOT),
payload_url: Optional[str]=REQ(validator=check_url, default=""),
service_name: Optional[str]=REQ(default=None),
config_data: Dict[str, str]=REQ(default={},
validator=check_dict(value_validator=check_string)),
interface_type: int=REQ(validator=check_int, default=Service.GENERIC),
default_sending_stream_name: Optional[str]=REQ('default_sending_stream', default=None),
default_events_register_stream_name: Optional[str]=REQ('default_events_register_stream',
default=None),
default_all_public_streams: Optional[bool]=REQ(validator=check_bool, default=None)
) -> HttpResponse:
short_name = check_short_name(short_name_raw)
if bot_type != UserProfile.INCOMING_WEBHOOK_BOT:
service_name = service_name or short_name
short_name += "-bot"
full_name = check_full_name(full_name_raw)
try:
email = '%s@%s' % (short_name, user_profile.realm.get_bot_domain())
except InvalidFakeEmailDomain:
return json_error(_("Can't create bots until FAKE_EMAIL_DOMAIN is correctly configured.\n"
"Please contact your server administrator."))
form = CreateUserForm({'full_name': full_name, 'email': email})
if bot_type == UserProfile.EMBEDDED_BOT:
if not settings.EMBEDDED_BOTS_ENABLED:
return json_error(_("Embedded bots are not enabled."))
if service_name not in [bot.name for bot in EMBEDDED_BOTS]:
return json_error(_("Invalid embedded bot name."))
if not form.is_valid():
# We validate client-side as well
return json_error(_('Bad name or username'))
try:
get_user_by_delivery_email(email, user_profile.realm)
return json_error(_("Username already in use"))
except UserProfile.DoesNotExist:
pass
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=full_name,
)
check_bot_creation_policy(user_profile, bot_type)
check_valid_bot_type(user_profile, bot_type)
check_valid_interface_type(interface_type)
if len(request.FILES) == 0:
avatar_source = UserProfile.AVATAR_FROM_GRAVATAR
elif len(request.FILES) != 1:
return json_error(_("You may only upload one file at a time"))
else:
avatar_source = UserProfile.AVATAR_FROM_USER
default_sending_stream = None
if default_sending_stream_name is not None:
(default_sending_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, default_sending_stream_name)
default_events_register_stream = None
if default_events_register_stream_name is not None:
(default_events_register_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, default_events_register_stream_name)
if bot_type in (UserProfile.INCOMING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT) and service_name:
check_valid_bot_config(bot_type, service_name, config_data)
bot_profile = do_create_user(email=email, password=None,
realm=user_profile.realm, full_name=full_name,
short_name=short_name,
bot_type=bot_type,
bot_owner=user_profile,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams)
if len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, bot_profile)
if bot_type in (UserProfile.OUTGOING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT):
assert(isinstance(service_name, str))
add_service(name=service_name,
user_profile=bot_profile,
base_url=payload_url,
interface=interface_type,
token=generate_api_key())
if bot_type == UserProfile.INCOMING_WEBHOOK_BOT and service_name:
set_bot_config(bot_profile, "integration_id", service_name)
if bot_type in (UserProfile.INCOMING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT):
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
notify_created_bot(bot_profile)
api_key = get_api_key(bot_profile)
json_result = dict(
api_key=api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=get_stream_name(bot_profile.default_sending_stream),
default_events_register_stream=get_stream_name(bot_profile.default_events_register_stream),
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success(json_result)
@require_member_or_admin
def get_bots_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
bot_profiles = bot_profiles.select_related('default_sending_stream', 'default_events_register_stream')
bot_profiles = bot_profiles.order_by('date_joined')
def bot_info(bot_profile: UserProfile) -> Dict[str, Any]:
default_sending_stream = get_stream_name(bot_profile.default_sending_stream)
default_events_register_stream = get_stream_name(bot_profile.default_events_register_stream)
# Bots are supposed to have only one API key, at least for now.
# Therefore we can safely asume that one and only valid API key will be
# the first one.
api_key = get_api_key(bot_profile)
return dict(
username=bot_profile.email,
full_name=bot_profile.full_name,
api_key=api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success({'bots': list(map(bot_info, bot_profiles))})
@has_request_variables
def get_members_backend(request: HttpRequest, user_profile: UserProfile,
include_custom_profile_fields: bool=REQ(validator=check_bool,
default=False),
client_gravatar: bool=REQ(validator=check_bool, default=False)
) -> HttpResponse:
'''
The client_gravatar field here is set to True if clients can compute
their own gravatars, which saves us bandwidth. We want to eventually
make this the default behavior, but we have old clients that expect
the server to compute this for us.
'''
realm = user_profile.realm
if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS:
# If email addresses are only available to administrators,
# clients cannot compute gravatars, so we force-set it to false.
client_gravatar = False
members = get_raw_user_data(realm,
user_profile=user_profile,
client_gravatar=client_gravatar,
include_custom_profile_fields=include_custom_profile_fields)
return json_success({'members': members.values()})
@require_realm_admin
@has_request_variables
def create_user_backend(request: HttpRequest, user_profile: UserProfile,
email: str=REQ(), password: str=REQ(), full_name_raw: str=REQ("full_name"),
short_name: str=REQ()) -> HttpResponse:
full_name = check_full_name(full_name_raw)
form = CreateUserForm({'full_name': full_name, 'email': email})
if not form.is_valid():
return json_error(_('Bad name or username'))
# Check that the new user's email address belongs to the admin's realm
# (Since this is an admin API, we don't require the user to have been
# invited first.)
realm = user_profile.realm
try:
email_allowed_for_realm(email, user_profile.realm)
except DomainNotAllowedForRealmError:
return json_error(_("Email '%(email)s' not allowed in this organization") %
{'email': email})
except DisposableEmailError:
return json_error(_("Disposable email addresses are not allowed in this organization"))
except EmailContainsPlusError:
return json_error(_("Email addresses containing + are not allowed."))
try:
get_user_by_delivery_email(email, user_profile.realm)
return json_error(_("Email '%s' already in use") % (email,))
except UserProfile.DoesNotExist:
pass
do_create_user(email, password, realm, full_name, short_name)
return json_success()
def generate_client_id() -> str:
return generate_random_token(32)
def get_profile_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1,
user_id = user_profile.id,
avatar_url = avatar_url(user_profile),
full_name = user_profile.full_name,
email = user_profile.email,
is_bot = user_profile.is_bot,
is_admin = user_profile.is_realm_admin,
short_name = user_profile.short_name)
if not user_profile.is_bot:
custom_profile_field_values = user_profile.customprofilefieldvalue_set.all()
profile_data = dict() # type: Dict[int, Dict[str, Any]]
for profile_field in custom_profile_field_values:
if profile_field.field.is_renderable():
profile_data[profile_field.field_id] = {
"value": profile_field.value,
"rendered_value": profile_field.rendered_value
}
else:
profile_data[profile_field.field_id] = {
"value": profile_field.value
}
result["profile_data"] = profile_data
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
def team_view(request: HttpRequest) -> HttpResponse:
with open(static_path('generated/github-contributors.json')) as f:
data = ujson.load(f)
return render(
request,
'zerver/team.html',
context={
'page_params': {
'contrib': data['contrib'],
},
'date': data['date'],
},
)
| ./CrossVul/dataset_final_sorted/CWE-287/py/good_1224_5 |
crossvul-python_data_bad_3282_0 | #!/usr/bin/python
from k5test import *
# Skip this test if pkinit wasn't built.
if not os.path.exists(os.path.join(plugins, 'preauth', 'pkinit.so')):
skip_rest('PKINIT tests', 'PKINIT module not built')
# Check if soft-pkcs11.so is available.
try:
import ctypes
lib = ctypes.LibraryLoader(ctypes.CDLL).LoadLibrary('soft-pkcs11.so')
del lib
have_soft_pkcs11 = True
except:
have_soft_pkcs11 = False
# Construct a krb5.conf fragment configuring pkinit.
certs = os.path.join(srctop, 'tests', 'dejagnu', 'pkinit-certs')
ca_pem = os.path.join(certs, 'ca.pem')
kdc_pem = os.path.join(certs, 'kdc.pem')
user_pem = os.path.join(certs, 'user.pem')
privkey_pem = os.path.join(certs, 'privkey.pem')
privkey_enc_pem = os.path.join(certs, 'privkey-enc.pem')
user_p12 = os.path.join(certs, 'user.p12')
user_enc_p12 = os.path.join(certs, 'user-enc.p12')
user_upn_p12 = os.path.join(certs, 'user-upn.p12')
user_upn2_p12 = os.path.join(certs, 'user-upn2.p12')
user_upn3_p12 = os.path.join(certs, 'user-upn3.p12')
path = os.path.join(os.getcwd(), 'testdir', 'tmp-pkinit-certs')
path_enc = os.path.join(os.getcwd(), 'testdir', 'tmp-pkinit-certs-enc')
pkinit_krb5_conf = {'realms': {'$realm': {
'pkinit_anchors': 'FILE:%s' % ca_pem}}}
pkinit_kdc_conf = {'realms': {'$realm': {
'default_principal_flags': '+preauth',
'pkinit_eku_checking': 'none',
'pkinit_identity': 'FILE:%s,%s' % (kdc_pem, privkey_pem),
'pkinit_indicator': ['indpkinit1', 'indpkinit2']}}}
restrictive_kdc_conf = {'realms': {'$realm': {
'restrict_anonymous_to_tgt': 'true' }}}
testprincs = {'krbtgt/KRBTEST.COM': {'keys': 'aes128-cts'},
'user': {'keys': 'aes128-cts', 'flags': '+preauth'},
'user2': {'keys': 'aes128-cts', 'flags': '+preauth'}}
alias_kdc_conf = {'realms': {'$realm': {
'default_principal_flags': '+preauth',
'pkinit_eku_checking': 'none',
'pkinit_allow_upn': 'true',
'pkinit_identity': 'FILE:%s,%s' % (kdc_pem, privkey_pem),
'database_module': 'test'}},
'dbmodules': {'test': {
'db_library': 'test',
'alias': {'user@krbtest.com': 'user'},
'princs': testprincs}}}
file_identity = 'FILE:%s,%s' % (user_pem, privkey_pem)
file_enc_identity = 'FILE:%s,%s' % (user_pem, privkey_enc_pem)
dir_identity = 'DIR:%s' % path
dir_enc_identity = 'DIR:%s' % path_enc
dir_file_identity = 'FILE:%s,%s' % (os.path.join(path, 'user.crt'),
os.path.join(path, 'user.key'))
dir_file_enc_identity = 'FILE:%s,%s' % (os.path.join(path_enc, 'user.crt'),
os.path.join(path_enc, 'user.key'))
p12_identity = 'PKCS12:%s' % user_p12
p12_upn_identity = 'PKCS12:%s' % user_upn_p12
p12_upn2_identity = 'PKCS12:%s' % user_upn2_p12
p12_upn3_identity = 'PKCS12:%s' % user_upn3_p12
p12_enc_identity = 'PKCS12:%s' % user_enc_p12
p11_identity = 'PKCS11:soft-pkcs11.so'
p11_token_identity = ('PKCS11:module_name=soft-pkcs11.so:'
'slotid=1:token=SoftToken (token)')
# Start a realm with the test kdb module for the following UPN SAN tests.
realm = K5Realm(krb5_conf=pkinit_krb5_conf, kdc_conf=alias_kdc_conf,
create_kdb=False)
realm.start_kdc()
# Compatibility check: cert contains UPN "user", which matches the
# request principal user@KRBTEST.COM if parsed as a normal principal.
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_upn2_identity])
# Compatibility check: cert contains UPN "user@KRBTEST.COM", which matches
# the request principal user@KRBTEST.COM if parsed as a normal principal.
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_upn3_identity])
# Cert contains UPN "user@krbtest.com" which is aliased to the request
# principal.
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_upn_identity])
# Test an id-pkinit-san match to a post-canonical principal.
realm.kinit('user@krbtest.com',
flags=['-E', '-X', 'X509_user_identity=%s' % p12_identity])
# Test a UPN match to a post-canonical principal. (This only works
# for the cert with the UPN containing just "user", as we don't allow
# UPN reparsing when comparing to the canonicalized client principal.)
realm.kinit('user@krbtest.com',
flags=['-E', '-X', 'X509_user_identity=%s' % p12_upn2_identity])
# Test a mismatch.
msg = 'kinit: Client name mismatch while getting initial credentials'
realm.run([kinit, '-X', 'X509_user_identity=%s' % p12_upn2_identity, 'user2'],
expected_code=1, expected_msg=msg)
realm.stop()
realm = K5Realm(krb5_conf=pkinit_krb5_conf, kdc_conf=pkinit_kdc_conf,
get_creds=False)
# Sanity check - password-based preauth should still work.
realm.run(['./responder', '-r', 'password=%s' % password('user'),
realm.user_princ])
realm.kinit(realm.user_princ, password=password('user'))
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# Test anonymous PKINIT.
realm.kinit('@%s' % realm.realm, flags=['-n'], expected_code=1,
expected_msg='not found in Kerberos database')
realm.addprinc('WELLKNOWN/ANONYMOUS')
realm.kinit('@%s' % realm.realm, flags=['-n'])
realm.klist('WELLKNOWN/ANONYMOUS@WELLKNOWN:ANONYMOUS')
realm.run([kvno, realm.host_princ])
out = realm.run(['./adata', realm.host_princ])
if '97:' in out:
fail('auth indicators seen in anonymous PKINIT ticket')
# Test anonymous kadmin.
f = open(os.path.join(realm.testdir, 'acl'), 'a')
f.write('WELLKNOWN/ANONYMOUS@WELLKNOWN:ANONYMOUS a *')
f.close()
realm.start_kadmind()
realm.run([kadmin, '-n', 'addprinc', '-pw', 'test', 'testadd'])
realm.run([kadmin, '-n', 'getprinc', 'testadd'], expected_code=1,
expected_msg="Operation requires ``get'' privilege")
realm.stop_kadmind()
# Test with anonymous restricted; FAST should work but kvno should fail.
r_env = realm.special_env('restrict', True, kdc_conf=restrictive_kdc_conf)
realm.stop_kdc()
realm.start_kdc(env=r_env)
realm.kinit('@%s' % realm.realm, flags=['-n'])
realm.kinit('@%s' % realm.realm, flags=['-n', '-T', realm.ccache])
realm.run([kvno, realm.host_princ], expected_code=1,
expected_msg='KDC policy rejects request')
# Regression test for #8458: S4U2Self requests crash the KDC if
# anonymous is restricted.
realm.kinit(realm.host_princ, flags=['-k'])
realm.run([kvno, '-U', 'user', realm.host_princ])
# Go back to a normal KDC and disable anonymous PKINIT.
realm.stop_kdc()
realm.start_kdc()
realm.run([kadminl, 'delprinc', 'WELLKNOWN/ANONYMOUS'])
# Run the basic test - PKINIT with FILE: identity, with no password on the key.
realm.run(['./responder', '-x', 'pkinit=',
'-X', 'X509_user_identity=%s' % file_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# Try again using RSA instead of DH.
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity,
'-X', 'flag_RSA_PROTOCOL=yes'])
realm.klist(realm.user_princ)
# Test a DH parameter renegotiation by temporarily setting a 4096-bit
# minimum on the KDC. (Preauth type 16 is PKINIT PA_PK_AS_REQ;
# 109 is PKINIT TD_DH_PARAMETERS; 133 is FAST PA-FX-COOKIE.)
minbits_kdc_conf = {'realms': {'$realm': {'pkinit_dh_min_bits': '4096'}}}
minbits_env = realm.special_env('restrict', True, kdc_conf=minbits_kdc_conf)
realm.stop_kdc()
realm.start_kdc(env=minbits_env)
expected_trace = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Preauth module pkinit (16) (real) returned: 0/Success',
'Produced preauth for next request: 133, 16',
'/Key parameters not accepted',
'Preauth tryagain input types (16): 109, 133',
'trying again with KDC-provided parameters',
'Preauth module pkinit (16) tryagain returned: 0/Success',
'Followup preauth for next request: 16, 133')
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity],
expected_trace=expected_trace)
realm.stop_kdc()
realm.start_kdc()
# Run the basic test - PKINIT with FILE: identity, with a password on the key,
# supplied by the prompter.
# Expect failure if the responder does nothing, and we have no prompter.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % file_enc_identity,
'-X', 'X509_user_identity=%s' % file_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_enc_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
realm.run(['./adata', realm.host_princ],
expected_msg='+97: [indpkinit1, indpkinit2]')
# Run the basic test - PKINIT with FILE: identity, with a password on the key,
# supplied by the responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % file_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % file_enc_identity,
'-X', 'X509_user_identity=%s' % file_enc_identity,
realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % file_enc_identity,
'-p', '%s=%s' % (file_enc_identity, 'encrypted'), realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with no password on the key.
os.mkdir(path)
os.mkdir(path_enc)
shutil.copy(privkey_pem, os.path.join(path, 'user.key'))
shutil.copy(privkey_enc_pem, os.path.join(path_enc, 'user.key'))
shutil.copy(user_pem, os.path.join(path, 'user.crt'))
shutil.copy(user_pem, os.path.join(path_enc, 'user.crt'))
realm.run(['./responder', '-x', 'pkinit=', '-X',
'X509_user_identity=%s' % dir_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % dir_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with a password on the key, supplied by the
# prompter.
# Expect failure if the responder does nothing, and we have no prompter.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % dir_file_enc_identity,
'-X', 'X509_user_identity=%s' % dir_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % dir_enc_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with a password on the key, supplied by the
# responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % dir_file_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % dir_file_enc_identity,
'-X', 'X509_user_identity=%s' % dir_enc_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % dir_enc_identity,
'-p', '%s=%s' % (dir_file_enc_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with no password on the bundle.
realm.run(['./responder', '-x', 'pkinit=',
'-X', 'X509_user_identity=%s' % p12_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with a password on the bundle, supplied by the
# prompter.
# Expect failure if the responder does nothing, and we have no prompter.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p12_enc_identity,
'-X', 'X509_user_identity=%s' % p12_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_enc_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with a password on the bundle, supplied by the
# responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p12_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % p12_enc_identity,
'-X', 'X509_user_identity=%s' % p12_enc_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % p12_enc_identity,
'-p', '%s=%s' % (p12_enc_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# Match a single rule.
rule = '<SAN>^user@KRBTEST.COM$'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity])
realm.klist(realm.user_princ)
# Match a combined rule (default prefix is &&).
rule = '<SUBJECT>CN=user$<KU>digitalSignature,keyEncipherment'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity])
realm.klist(realm.user_princ)
# Fail an && rule.
rule = '&&<SUBJECT>O=OTHER.COM<SAN>^user@KRBTEST.COM$'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
msg = 'kinit: Certificate mismatch while getting initial credentials'
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity],
expected_code=1, expected_msg=msg)
# Pass an || rule.
rule = '||<SUBJECT>O=KRBTEST.COM<SAN>^otheruser@KRBTEST.COM$'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity])
realm.klist(realm.user_princ)
# Fail an || rule.
rule = '||<SUBJECT>O=OTHER.COM<SAN>^otheruser@KRBTEST.COM$'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
msg = 'kinit: Certificate mismatch while getting initial credentials'
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity],
expected_code=1, expected_msg=msg)
if not have_soft_pkcs11:
skip_rest('PKINIT PKCS11 tests', 'soft-pkcs11.so not found')
softpkcs11rc = os.path.join(os.getcwd(), 'testdir', 'soft-pkcs11.rc')
realm.env['SOFTPKCS11RC'] = softpkcs11rc
# PKINIT with PKCS11: identity, with no need for a PIN.
conf = open(softpkcs11rc, 'w')
conf.write("%s\t%s\t%s\t%s\n" % ('user', 'user token', user_pem, privkey_pem))
conf.close()
# Expect to succeed without having to supply any more information.
realm.run(['./responder', '-x', 'pkinit=',
'-X', 'X509_user_identity=%s' % p11_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p11_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS11: identity, with a PIN supplied by the prompter.
os.remove(softpkcs11rc)
conf = open(softpkcs11rc, 'w')
conf.write("%s\t%s\t%s\t%s\n" % ('user', 'user token', user_pem,
privkey_enc_pem))
conf.close()
# Expect failure if the responder does nothing, and there's no prompter
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p11_token_identity,
'-X', 'X509_user_identity=%s' % p11_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p11_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# Supply the wrong PIN, and verify that we ignore the draft9 padata offer
# in the KDC method data after RFC 4556 PKINIT fails.
expected_trace = ('PKINIT client has no configured identity; giving up',
'PKINIT client ignoring draft 9 offer from RFC 4556 KDC')
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p11_identity],
password='wrong', expected_code=1, expected_trace=expected_trace)
# PKINIT with PKCS11: identity, with a PIN supplied by the responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p11_token_identity,
'-r', 'pkinit={"%s": "encrypted"}' % p11_token_identity,
'-X', 'X509_user_identity=%s' % p11_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % p11_identity,
'-p', '%s=%s' % (p11_token_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
success('PKINIT tests')
| ./CrossVul/dataset_final_sorted/CWE-287/py/bad_3282_0 |
crossvul-python_data_good_1224_2 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2019-11-21 01:47
from __future__ import unicode_literals
from django.db import migrations
from typing import Any, List
class Migration(migrations.Migration):
dependencies = [
('zerver', '0253_userprofile_wildcard_mentions_notify'),
('zerver', '0209_user_profile_no_empty_password'),
]
operations = [
] # type: List[Any]
| ./CrossVul/dataset_final_sorted/CWE-287/py/good_1224_2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.